Results Study 3: Personality Change Through Self-Improvement or Self-Acceptance

Author

[blinded]

1 Load packages

Show the code
library(renv)
library(tidyverse)
library(broom)
library(labelled)
library(psych)
library(GPArotation)
#library(devtools)
#install_github("cran/multicon") # not on CRAN atm
library(multicon)
library(correlation)
library(careless)
library(corrplot)
library(lavaan)
library(semTools)
library(semPlot)
library(knitr)
library(ggdist)
library(ggforce)
library(cowplot)
library(nortest)
library(lmerTest)
library(effectsize)

2 Data cleaning

Show the code
# source("clean_data_st3.R") 
# for data protection reasons, we provide the cleaned data sets here where potentially sensitive variables were dropped (see "clean_data_st3.R" for details)
base::load("data/df_sbsa3.rda")

Also need this list of the Big Five traits:

Show the code
### BFI-2 - Traits
b5t_extraversion = c("_01", "_06", "_11", "_16", "_21", "_26", 
                     "_31", "_36", "_41", "_46", "_51", "_56")
b5t_agreeableness = c("_02", "_07", "_12", "_17", "_22", "_27", 
                      "_32", "_37", "_42", "_47", "_52", "_57")
b5t_conscientiousness = c("_03", "_08", "_13", "_18", "_23", "_28", 
                          "_33", "_38", "_43", "_48", "_53", "_58")
b5t_neuroticism = c("_04", "_09", "_14", "_19", "_24", "_29", 
                    "_34", "_39", "_44", "_49", "_54", "_59")
b5t_openness = c("_05", "_10", "_15", "_20", "_25", "_30", 
                 "_35", "_40", "_45", "_50", "_55", "_60")
### BFI-2 - Facets
# facets of extraversion
b5f_sociability = c("_01", "_16", "_31", "_46") 
b5f_assertiveness = c("_06", "_21", "_36", "_51")
b5f_energy = c("_11", "_26", "_41", "_56")
# facets of agreeableness
b5f_compassion = c("_02", "_17", "_32", "_47")
b5f_respectfulness = c("_07", "_22", "_37", "_52")
b5f_trust = c("_12", "_27", "_42", "_57")
# facets of conscientiousness
b5f_organization = c("_03", "_18", "_33", "_48")
b5f_productiveness = c("_08", "_23", "_38", "_53")
b5f_responsibility = c("_13", "_28", "_43", "_58")
# facets of neuroticism
b5f_anxiety = c("_04", "_19", "_34", "_49")
b5f_depression = c("_09", "_24", "_39", "_54")
b5f_volatility = c("_14", "_29", "_44", "_59")
# facets of openness
b5f_curiosity = c("_10", "_25", "_40", "_55")
b5f_aesthetic = c("_05", "_20", "_35", "_50")
b5f_imagination = c("_15", "_30", "_45", "_60")

# add keys list (to indicate reverse scoring) - unfortunately this does not work (for the facets) automatically with psych function
keys_extraversion = c("+", "+", "-", "-", "+", "-", "-", "-", "+", "+", "-", "+")
keys_agreeableness = c("+", "+", "-", "-", "-", "+", "+", "-", "-", "-", "+", "+")
keys_conscientiousness = c("-", "-", "+", "+", "-", "-", "+", "+", "+", "-", "+", "-")
keys_neuroticism = c("-", "-", "+", "+", "-", "-", "+", "+", "-", "-", "+", "+")
keys_openness = c("-", "+", "+", "+", "-", "-", "+", "+", "-", "-", "-", "+")
# facets of extraversion
keys_sociability = c("+", "-", "-", "+") 
keys_assertiveness = c("+", "+", "-", "-")
keys_energy = c("-", "-", "+", "+")
# facets of agreeableness
keys_compassion = c("+", "-", "+", "-")
keys_respectfulness = c("+", "-", "-", "+")
keys_trust = c("-", "+", "-", "+")
# facets of conscientiousness
keys_organization = c("-", "+", "+", "-")
keys_productiveness = c("-", "-", "+", "+")
keys_responsibility = c("+", "-", "+", "-")
# facets of neuroticism
keys_anxiety = c("-", "+", "+", "-")
keys_depression = c("-", "-", "+", "+")
keys_volatility = c("+", "-", "-", "+")
# facets of openness
keys_curiosity = c("+", "-", "+", "-")
keys_aesthetic = c("-", "+", "+", "-")
keys_imagination = c("+", "-", "-", "+")

b5_vars <- list(list(b5t_extraversion, keys_extraversion), 
                list(b5t_agreeableness, keys_agreeableness), 
                list(b5t_conscientiousness, keys_conscientiousness), 
                list(b5t_neuroticism, keys_neuroticism), 
                list(b5t_openness, keys_openness),
                list(b5f_sociability, keys_sociability), 
                list(b5f_assertiveness, keys_assertiveness), 
                list(b5f_energy, keys_energy), 
                list(b5f_compassion, keys_compassion), 
                list(b5f_respectfulness, keys_respectfulness), 
                list(b5f_trust, keys_trust),
                list(b5f_organization, keys_organization), 
                list(b5f_productiveness, keys_productiveness), 
                list(b5f_responsibility, keys_responsibility),
                list(b5f_anxiety, keys_anxiety), 
                list(b5f_depression, keys_depression), 
                list(b5f_volatility, keys_volatility),
                list(b5f_curiosity, keys_curiosity), 
                list(b5f_aesthetic, keys_aesthetic), 
                list(b5f_imagination, keys_imagination))
names(b5_vars) = c("extraversion", "agreeableness", "conscientiousness", "neuroticism", "openness",
                   "sociability", "assertiveness", "energy", 
                   "compassion", "respectfulness", "trust",
                   "organization", "productiveness", "responsibility",
                   "anxiety", "depression", "volatility",
                   "curiosity", "aesthetic", "imagination")

3 Create item parcels

3.1 Well-being measures

Reshape data

Show the code
df_sbsa3_wide_wb <- df_sbsa3 %>% 
  arrange(pid, time) %>% 
  select(pid, time, group, starts_with(c("sw06", "ml01", "rs01", "sc01"))) %>% 
  pivot_wider(names_from = time,
              names_sep = "_t",
              values_from = starts_with(c("sw06", "ml01", "rs01", "sc01")))
# colnames(df_sbsa3_wide_wb)

3.1.1 Meaning in life

Check CFA item loadings at T1

cfa_meaning <- '
# Define the latent factors
meaning1 =~ NA*ml01_01_t1 + lambda1*ml01_01_t1 + lambda4*ml01_04_t1 + lambda5*ml01_05_t1 + lambda6*ml01_06_t1 + lambda9*ml01_09_t1

# Intercepts
ml01_01_t1 ~ i1*1
ml01_04_t1 ~ 1
ml01_05_t1 ~ 1
ml01_06_t1 ~ 1
ml01_09_t1 ~ 1

# Unique Variances
ml01_01_t1 ~~ ml01_01_t1
ml01_04_t1 ~~ ml01_04_t1
ml01_05_t1 ~~ ml01_05_t1
ml01_06_t1 ~~ ml01_06_t1
ml01_09_t1 ~~ ml01_09_t1

# Latent Variable Means
meaning1 ~ 0*1

# Latent Variable Variances and Covariance
meaning1 ~~ 1*meaning1
'
fit_cfa_meaning <- cfa(cfa_meaning, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_cfa_meaning, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 16 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        15

                                                  Used       Total
  Number of observations                           529         530
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                                29.159
  Degrees of freedom                                 5
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                              1836.506
  Degrees of freedom                                10
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.987
  Tucker-Lewis Index (TLI)                       0.974
                                                      
  Robust Comparative Fit Index (CFI)             0.987
  Robust Tucker-Lewis Index (TLI)                0.974

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -4279.708
  Loglikelihood unrestricted model (H1)      -4265.129
                                                      
  Akaike (AIC)                                8589.417
  Bayesian (BIC)                              8653.481
  Sample-size adjusted Bayesian (SABIC)       8605.867

Root Mean Square Error of Approximation:

  RMSEA                                          0.096
  90 Percent confidence interval - lower         0.064
  90 Percent confidence interval - upper         0.130
  P-value H_0: RMSEA <= 0.050                    0.011
  P-value H_0: RMSEA >= 0.080                    0.805
                                                      
  Robust RMSEA                                   0.096
  90 Percent confidence interval - lower         0.064
  90 Percent confidence interval - upper         0.130
  P-value H_0: Robust RMSEA <= 0.050             0.011
  P-value H_0: Robust RMSEA >= 0.080             0.805

Standardized Root Mean Square Residual:

  SRMR                                           0.017

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  meaning1 =~                                         
    m01_01_ (lmb1)    1.401    0.060   23.505    0.000
    m01_04_ (lmb4)    1.460    0.058   25.291    0.000
    m01_05_ (lmb5)    1.357    0.059   22.942    0.000
    m01_06_ (lmb6)    1.476    0.061   24.139    0.000
    m01_09_ (lmb9)   -1.394    0.075  -18.646    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .ml01_01_1 (i1)    4.386    0.072   60.690    0.000
   .ml01_04_1         4.087    0.072   56.855    0.000
   .ml01_05_1         4.699    0.071   66.177    0.000
   .ml01_06_1         4.181    0.075   55.922    0.000
   .ml01_09_1         3.711    0.084   44.107    0.000
    meaning1          0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .ml01_01_t1        0.799    0.062   12.833    0.000
   .ml01_04_t1        0.602    0.054   11.153    0.000
   .ml01_05_t1        0.827    0.063   13.046    0.000
   .ml01_06_t1        0.780    0.064   12.222    0.000
   .ml01_09_t1        1.802    0.122   14.743    0.000
    meaning1          1.000                           
tidy(fit_cfa_meaning) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 5 × 6
  term                   op    label   estimate std.error std.all
  <chr>                  <chr> <chr>      <dbl>     <dbl>   <dbl>
1 meaning1 =~ ml01_06_t1 =~    lambda6     1.48    0.0611   0.858
2 meaning1 =~ ml01_04_t1 =~    lambda4     1.46    0.0577   0.883
3 meaning1 =~ ml01_01_t1 =~    lambda1     1.40    0.0596   0.843
4 meaning1 =~ ml01_09_t1 =~    lambda9    -1.39    0.0747  -0.720
5 meaning1 =~ ml01_05_t1 =~    lambda5     1.36    0.0591   0.831

Fit is good (except for RMSEA, but close)! We don’t need item parcels for meaning in life (5-item presence of meaning subscale).

Also check search for meaning subscale:

cfa_search <- '
# Define the latent factors
search1 =~ NA*ml01_02_t1 + lambda1*ml01_02_t1 + lambda4*ml01_03_t1 + lambda5*ml01_07_t1 + lambda6*ml01_08_t1 + lambda9*ml01_10_t1

# Intercepts
ml01_02_t1 ~ i1*1
ml01_03_t1 ~ 1
ml01_07_t1 ~ 1
ml01_08_t1 ~ 1
ml01_10_t1 ~ 1

# Unique Variances
ml01_02_t1 ~~ ml01_02_t1
ml01_03_t1 ~~ ml01_03_t1
ml01_07_t1 ~~ ml01_07_t1
ml01_08_t1 ~~ ml01_08_t1
ml01_10_t1 ~~ ml01_10_t1

# Latent Variable Means
search1 ~ 0*1

# Latent Variable Variances and Covariance
search1 ~~ 1*search1
'
fit_cfa_search <- cfa(cfa_search, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_cfa_search, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 16 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        15

                                                  Used       Total
  Number of observations                           529         530
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                                69.898
  Degrees of freedom                                 5
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                              1818.781
  Degrees of freedom                                10
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.964
  Tucker-Lewis Index (TLI)                       0.928
                                                      
  Robust Comparative Fit Index (CFI)             0.964
  Robust Tucker-Lewis Index (TLI)                0.928

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -4192.882
  Loglikelihood unrestricted model (H1)      -4157.933
                                                      
  Akaike (AIC)                                8415.764
  Bayesian (BIC)                              8479.829
  Sample-size adjusted Bayesian (SABIC)       8432.214

Root Mean Square Error of Approximation:

  RMSEA                                          0.157
  90 Percent confidence interval - lower         0.125
  90 Percent confidence interval - upper         0.190
  P-value H_0: RMSEA <= 0.050                    0.000
  P-value H_0: RMSEA >= 0.080                    1.000
                                                      
  Robust RMSEA                                   0.157
  90 Percent confidence interval - lower         0.125
  90 Percent confidence interval - upper         0.190
  P-value H_0: Robust RMSEA <= 0.050             0.000
  P-value H_0: Robust RMSEA >= 0.080             1.000

Standardized Root Mean Square Residual:

  SRMR                                           0.027

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  search1 =~                                          
    m01_02_ (lmb1)    1.246    0.059   21.092    0.000
    m01_03_ (lmb4)    1.348    0.059   22.725    0.000
    m01_07_ (lmb5)    1.319    0.058   22.803    0.000
    m01_08_ (lmb6)    1.444    0.059   24.579    0.000
    m01_10_ (lmb9)    1.441    0.065   22.124    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .ml01_02_1 (i1)    5.178    0.069   75.523    0.000
   .ml01_03_1         5.002    0.071   70.793    0.000
   .ml01_07_1         4.888    0.069   70.633    0.000
   .ml01_08_1         4.843    0.072   67.143    0.000
   .ml01_10_1         4.677    0.077   60.690    0.000
    search1           0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .ml01_02_t1        0.935    0.070   13.385    0.000
   .ml01_03_t1        0.824    0.066   12.516    0.000
   .ml01_07_t1        0.795    0.061   12.948    0.000
   .ml01_08_t1        0.666    0.059   11.199    0.000
   .ml01_10_t1        1.066    0.080   13.248    0.000
    search1           1.000                           
tidy(fit_cfa_search) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 5 × 6
  term                  op    label   estimate std.error std.all
  <chr>                 <chr> <chr>      <dbl>     <dbl>   <dbl>
1 search1 =~ ml01_08_t1 =~    lambda6     1.44    0.0588   0.871
2 search1 =~ ml01_10_t1 =~    lambda9     1.44    0.0651   0.813
3 search1 =~ ml01_03_t1 =~    lambda4     1.35    0.0593   0.829
4 search1 =~ ml01_07_t1 =~    lambda5     1.32    0.0578   0.828
5 search1 =~ ml01_02_t1 =~    lambda1     1.25    0.0591   0.790

Also good fit (except for RMSEA again…).

3.1.2 Self-esteem

Check CFA item loadings at T1

cfa_selfes <- '
# Define the latent factors
selfes1 =~ NA*rs01_01_t1 + lambda1*rs01_01_t1 + lambda2*rs01_02_t1 + lambda3*rs01_03_t1 + lambda4*rs01_04_t1 + lambda5*rs01_05_t1 + lambda6*rs01_06_t1 + lambda7*rs01_07_t1 + lambda8*rs01_08_t1 + lambda9*rs01_09_t1 + lambda10*rs01_10_t1

# Intercepts
rs01_01_t1 ~ i1*1
rs01_02_t1 ~ 1
rs01_03_t1 ~ 1
rs01_04_t1 ~ 1
rs01_05_t1 ~ 1
rs01_06_t1 ~ 1
rs01_07_t1 ~ 1
rs01_08_t1 ~ 1
rs01_09_t1 ~ 1
rs01_10_t1 ~ 1

# Unique Variances
rs01_01_t1 ~~ rs01_01_t1
rs01_02_t1 ~~ rs01_02_t1
rs01_03_t1 ~~ rs01_03_t1
rs01_04_t1 ~~ rs01_04_t1
rs01_05_t1 ~~ rs01_05_t1
rs01_06_t1 ~~ rs01_06_t1
rs01_07_t1 ~~ rs01_07_t1
rs01_08_t1 ~~ rs01_08_t1
rs01_09_t1 ~~ rs01_09_t1
rs01_10_t1 ~~ rs01_10_t1

# Latent Variable Means
selfes1 ~ 0*1

# Latent Variable Variances and Covariance
selfes1 ~~ 1*selfes1
'
fit_cfa_selfes <- cfa(cfa_selfes, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_cfa_selfes, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 16 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        30

                                                  Used       Total
  Number of observations                           529         530
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                               312.036
  Degrees of freedom                                35
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                              2853.989
  Degrees of freedom                                45
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.901
  Tucker-Lewis Index (TLI)                       0.873
                                                      
  Robust Comparative Fit Index (CFI)             0.901
  Robust Tucker-Lewis Index (TLI)                0.873

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -7113.195
  Loglikelihood unrestricted model (H1)      -6957.177
                                                      
  Akaike (AIC)                               14286.389
  Bayesian (BIC)                             14414.519
  Sample-size adjusted Bayesian (SABIC)      14319.290

Root Mean Square Error of Approximation:

  RMSEA                                          0.122
  90 Percent confidence interval - lower         0.110
  90 Percent confidence interval - upper         0.135
  P-value H_0: RMSEA <= 0.050                    0.000
  P-value H_0: RMSEA >= 0.080                    1.000
                                                      
  Robust RMSEA                                   0.122
  90 Percent confidence interval - lower         0.110
  90 Percent confidence interval - upper         0.135
  P-value H_0: Robust RMSEA <= 0.050             0.000
  P-value H_0: Robust RMSEA >= 0.080             1.000

Standardized Root Mean Square Residual:

  SRMR                                           0.058

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  selfes1 =~                                          
    r01_01_ (lmb1)    0.865    0.046   18.773    0.000
    r01_02_ (lmb2)   -1.004    0.052  -19.321    0.000
    r01_03_ (lmb3)    0.518    0.037   14.161    0.000
    r01_04_ (lmb4)    0.604    0.041   14.894    0.000
    r01_05_ (lmb5)   -1.003    0.049  -20.544    0.000
    r01_06_ (lmb6)   -1.141    0.051  -22.462    0.000
    r01_07_ (lmb7)    0.573    0.041   13.926    0.000
    r01_08_ (lmb8)   -0.709    0.054  -13.125    0.000
    r01_09_ (lmb9)   -1.159    0.048  -23.923    0.000
    r01_10_ (lm10)    0.881    0.043   20.303    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .rs01_01_1 (i1)    3.193    0.052   61.496    0.000
   .rs01_02_1         3.178    0.059   53.827    0.000
   .rs01_03_1         3.996    0.039  103.400    0.000
   .rs01_04_1         3.871    0.043   89.531    0.000
   .rs01_05_1         2.711    0.057   47.940    0.000
   .rs01_06_1         3.144    0.061   51.941    0.000
   .rs01_07_1         3.777    0.043   87.144    0.000
   .rs01_08_1         3.554    0.056   62.922    0.000
   .rs01_09_1         2.584    0.059   43.701    0.000
   .rs01_10_1         3.361    0.050   67.200    0.000
    selfes1           0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .rs01_01_t1        0.678    0.046   14.663    0.000
   .rs01_02_t1        0.835    0.058   14.477    0.000
   .rs01_03_t1        0.521    0.034   15.473    0.000
   .rs01_04_t1        0.625    0.041   15.390    0.000
   .rs01_05_t1        0.685    0.048   14.172    0.000
   .rs01_06_t1        0.635    0.048   13.220    0.000
   .rs01_07_t1        0.666    0.043   15.534    0.000
   .rs01_08_t1        1.185    0.076   15.667    0.000
   .rs01_09_t1        0.506    0.040   12.566    0.000
   .rs01_10_t1        0.548    0.039   14.207    0.000
    selfes1           1.000                           
tidy(fit_cfa_selfes) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 10 × 6
   term                  op    label    estimate std.error std.all
   <chr>                 <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 selfes1 =~ rs01_09_t1 =~    lambda9    -1.16     0.0485  -0.852
 2 selfes1 =~ rs01_06_t1 =~    lambda6    -1.14     0.0508  -0.820
 3 selfes1 =~ rs01_02_t1 =~    lambda2    -1.00     0.0520  -0.740
 4 selfes1 =~ rs01_05_t1 =~    lambda5    -1.00     0.0488  -0.772
 5 selfes1 =~ rs01_10_t1 =~    lambda10    0.881    0.0434   0.766
 6 selfes1 =~ rs01_01_t1 =~    lambda1     0.865    0.0461   0.724
 7 selfes1 =~ rs01_08_t1 =~    lambda8    -0.709    0.0540  -0.546
 8 selfes1 =~ rs01_04_t1 =~    lambda4     0.604    0.0405   0.607
 9 selfes1 =~ rs01_07_t1 =~    lambda7     0.573    0.0411   0.574
10 selfes1 =~ rs01_03_t1 =~    lambda3     0.518    0.0366   0.583

Reverse-code and form parcels:

df_sbsa3_wide_wb <- df_sbsa3_wide_wb %>% 
  mutate(rs01_09_t1_r = rs01_09_t1,
         rs01_06_t1_r = rs01_06_t1,
         rs01_02_t1_r = rs01_02_t1,
         rs01_05_t1_r = rs01_05_t1,
         rs01_08_t1_r = rs01_08_t1,
         rs01_09_t2_r = rs01_09_t2,
         rs01_06_t2_r = rs01_06_t2,
         rs01_02_t2_r = rs01_02_t2,
         rs01_05_t2_r = rs01_05_t2,
         rs01_08_t2_r = rs01_08_t2) %>% 
  mutate(across(intersect(starts_with("rs01"), ends_with("_r")), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(selfes_par1_t1 = rowMeans(across(c(rs01_09_t1_r, rs01_01_t1, rs01_07_t1, rs01_03_t1)), na.rm=T),
         selfes_par2_t1 = rowMeans(across(c(rs01_06_t1_r, rs01_10_t1, rs01_04_t1)), na.rm=T),
         selfes_par3_t1 = rowMeans(across(c(rs01_02_t1_r, rs01_05_t1_r, rs01_08_t1_r)), na.rm=T),
         selfes_par1_t2 = rowMeans(across(c(rs01_09_t2_r, rs01_01_t2, rs01_07_t2, rs01_03_t2)), na.rm=T),
         selfes_par2_t2 = rowMeans(across(c(rs01_06_t2_r, rs01_10_t2, rs01_04_t2)), na.rm=T),
         selfes_par3_t2 = rowMeans(across(c(rs01_02_t2_r, rs01_05_t2_r, rs01_08_t2_r)), na.rm=T))

3.1.3 Self-concept clarity

Check CFA item loadings at T1

cfa_concept <- '
# Define the latent factors
concept1 =~ NA*sc01_01_t1 + lambda1*sc01_01_t1 + lambda2*sc01_02_t1 + lambda3*sc01_03_t1 + lambda4*sc01_04_t1 + lambda5*sc01_05_t1 + lambda6*sc01_06_t1 + lambda7*sc01_07_t1 + lambda8*sc01_08_t1 + lambda9*sc01_09_t1 + lambda10*sc01_10_t1 + lambda11*sc01_11_t1 + lambda12*sc01_12_t1

# Intercepts
sc01_01_t1 ~ i1*1
sc01_02_t1 ~ 1
sc01_03_t1 ~ 1
sc01_04_t1 ~ 1
sc01_05_t1 ~ 1
sc01_06_t1 ~ 1
sc01_07_t1 ~ 1
sc01_08_t1 ~ 1
sc01_09_t1 ~ 1
sc01_10_t1 ~ 1
sc01_11_t1 ~ 1
sc01_12_t1 ~ 1

# Unique Variances
sc01_01_t1 ~~ sc01_01_t1
sc01_02_t1 ~~ sc01_02_t1
sc01_03_t1 ~~ sc01_03_t1
sc01_04_t1 ~~ sc01_04_t1
sc01_05_t1 ~~ sc01_05_t1
sc01_06_t1 ~~ sc01_06_t1
sc01_07_t1 ~~ sc01_07_t1
sc01_08_t1 ~~ sc01_08_t1
sc01_09_t1 ~~ sc01_09_t1
sc01_10_t1 ~~ sc01_10_t1
sc01_11_t1 ~~ sc01_11_t1
sc01_12_t1 ~~ sc01_12_t1

# Latent Variable Means
concept1 ~ 0*1

# Latent Variable Variances and Covariance
concept1 ~~ 1*concept1
'
fit_cfa_concept <- cfa(cfa_concept, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_cfa_concept, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 15 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           529         530
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                               244.803
  Degrees of freedom                                54
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                              3007.929
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.935
  Tucker-Lewis Index (TLI)                       0.921
                                                      
  Robust Comparative Fit Index (CFI)             0.935
  Robust Tucker-Lewis Index (TLI)                0.921

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -9254.579
  Loglikelihood unrestricted model (H1)      -9132.177
                                                      
  Akaike (AIC)                               18581.157
  Bayesian (BIC)                             18734.913
  Sample-size adjusted Bayesian (SABIC)      18620.639

Root Mean Square Error of Approximation:

  RMSEA                                          0.082
  90 Percent confidence interval - lower         0.071
  90 Percent confidence interval - upper         0.092
  P-value H_0: RMSEA <= 0.050                    0.000
  P-value H_0: RMSEA >= 0.080                    0.620
                                                      
  Robust RMSEA                                   0.082
  90 Percent confidence interval - lower         0.071
  90 Percent confidence interval - upper         0.092
  P-value H_0: Robust RMSEA <= 0.050             0.000
  P-value H_0: Robust RMSEA >= 0.080             0.620

Standardized Root Mean Square Residual:

  SRMR                                           0.038

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  concept1 =~                                         
    s01_01_ (lmb1)    0.936    0.049   19.105    0.000
    s01_02_ (lmb2)    1.030    0.051   20.317    0.000
    s01_03_ (lmb3)    1.000    0.052   19.057    0.000
    s01_04_ (lmb4)    1.044    0.052   20.007    0.000
    s01_05_ (lmb5)    0.844    0.051   16.465    0.000
    s01_06_ (lmb6)    0.081    0.055    1.474    0.140
    s01_07_ (lmb7)    0.736    0.055   13.311    0.000
    s01_08_ (lmb8)    0.976    0.045   21.672    0.000
    s01_09_ (lmb9)    0.966    0.049   19.624    0.000
    s01_10_ (lm10)    0.848    0.053   16.066    0.000
    s01_11_ (lm11)   -0.795    0.046  -17.144    0.000
    s01_12_ (lm12)    0.868    0.053   16.325    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .sc01_01_1 (i1)    3.102    0.055   55.942    0.000
   .sc01_02_1         3.361    0.058   57.564    0.000
   .sc01_03_1         3.208    0.059   54.067    0.000
   .sc01_04_1         2.987    0.060   49.891    0.000
   .sc01_05_1         2.832    0.056   50.670    0.000
   .sc01_06_1         2.941    0.053   55.600    0.000
   .sc01_07_1         2.656    0.058   45.890    0.000
   .sc01_08_1         2.597    0.053   48.970    0.000
   .sc01_09_1         2.696    0.056   47.962    0.000
   .sc01_10_1         2.845    0.057   49.746    0.000
   .sc01_11_1         3.435    0.051   67.250    0.000
   .sc01_12_1         2.945    0.058   50.886    0.000
    concept1          0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .sc01_01_t1        0.751    0.052   14.547    0.000
   .sc01_02_t1        0.744    0.053   14.130    0.000
   .sc01_03_t1        0.863    0.059   14.557    0.000
   .sc01_04_t1        0.806    0.056   14.271    0.000
   .sc01_05_t1        0.941    0.062   15.179    0.000
   .sc01_06_t1        1.474    0.091   16.257    0.000
   .sc01_07_t1        1.230    0.079   15.627    0.000
   .sc01_08_t1        0.535    0.039   13.661    0.000
   .sc01_09_t1        0.737    0.051   14.438    0.000
   .sc01_10_t1        1.011    0.067   15.200    0.000
   .sc01_11_t1        0.747    0.050   15.037    0.000
   .sc01_12_t1        1.019    0.067   15.215    0.000
    concept1          1.000                           
tidy(fit_cfa_concept) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                   op    label    estimate std.error std.all
   <chr>                  <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 concept1 =~ sc01_04_t1 =~    lambda4    1.04      0.0522  0.758 
 2 concept1 =~ sc01_02_t1 =~    lambda2    1.03      0.0507  0.767 
 3 concept1 =~ sc01_03_t1 =~    lambda3    1.00      0.0524  0.732 
 4 concept1 =~ sc01_08_t1 =~    lambda8    0.976     0.0451  0.800 
 5 concept1 =~ sc01_09_t1 =~    lambda9    0.966     0.0492  0.747 
 6 concept1 =~ sc01_01_t1 =~    lambda1    0.936     0.0490  0.734 
 7 concept1 =~ sc01_12_t1 =~    lambda12   0.868     0.0531  0.652 
 8 concept1 =~ sc01_10_t1 =~    lambda10   0.848     0.0528  0.645 
 9 concept1 =~ sc01_05_t1 =~    lambda5    0.844     0.0512  0.656 
10 concept1 =~ sc01_11_t1 =~    lambda11  -0.795     0.0464 -0.677 
11 concept1 =~ sc01_07_t1 =~    lambda7    0.736     0.0553  0.553 
12 concept1 =~ sc01_06_t1 =~    lambda6    0.0812    0.0551  0.0667

Reverse-code and form parcels:

df_sbsa3_wide_wb <- df_sbsa3_wide_wb %>% # based on the item content, it makes more sense here to recode all except sc01_11
  mutate(sc01_01_t1_r = sc01_01_t1,
         sc01_02_t1_r = sc01_02_t1,
         sc01_03_t1_r = sc01_03_t1,
         sc01_04_t1_r = sc01_04_t1,
         sc01_05_t1_r = sc01_05_t1,
         sc01_06_t1_r = sc01_06_t1,
         sc01_07_t1_r = sc01_07_t1,
         sc01_08_t1_r = sc01_08_t1,
         sc01_09_t1_r = sc01_09_t1,
         sc01_10_t1_r = sc01_10_t1,
         sc01_12_t1_r = sc01_12_t1,
         sc01_01_t2_r = sc01_01_t2,
         sc01_02_t2_r = sc01_02_t2,
         sc01_03_t2_r = sc01_03_t2,
         sc01_04_t2_r = sc01_04_t2,
         sc01_05_t2_r = sc01_05_t2,
         sc01_06_t2_r = sc01_06_t2,
         sc01_07_t2_r = sc01_07_t2,
         sc01_08_t2_r = sc01_08_t2,
         sc01_09_t2_r = sc01_09_t2,
         sc01_10_t2_r = sc01_10_t2,
         sc01_12_t2_r = sc01_12_t2) %>% 
  mutate(across(intersect(starts_with("sc01"), ends_with("_r")), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(concept_par1_t1 = rowMeans(across(c(sc01_04_t1_r, sc01_01_t1_r, sc01_05_t1_r, sc01_06_t1_r)), na.rm=T),
         concept_par2_t1 = rowMeans(across(c(sc01_02_t1_r, sc01_09_t1_r, sc01_10_t1_r, sc01_07_t1_r)), na.rm=T),
         concept_par3_t1 = rowMeans(across(c(sc01_03_t1_r, sc01_08_t1_r, sc01_12_t1_r, sc01_11_t1)), na.rm=T),
         concept_par1_t2 = rowMeans(across(c(sc01_04_t2_r, sc01_01_t2_r, sc01_05_t2_r, sc01_06_t2_r)), na.rm=T),
         concept_par2_t2 = rowMeans(across(c(sc01_02_t2_r, sc01_09_t2_r, sc01_10_t2_r, sc01_07_t2_r)), na.rm=T),
         concept_par3_t2 = rowMeans(across(c(sc01_03_t2_r, sc01_08_t2_r, sc01_12_t2_r, sc01_11_t2)), na.rm=T))

# replace NaN with regular NA
df_sbsa3_wide_wb <- df_sbsa3_wide_wb %>% 
  mutate_all(~ifelse(is.nan(.), NA, .))

3.2 Big Five

Show the code
# all time points
df_sbsa3_wide_pers <- df_sbsa3 %>% 
  arrange(pid, time) %>% 
  select(pid, group, time, starts_with(c("bf05", "bf06"))) %>% 
  mutate(valid=1) %>% 
  pivot_wider(names_from = time,
              names_sep = "_t",
              values_from = c(valid, starts_with(c("bf05", "bf06"))))
# colnames(df_sbsa3_wide_pers)

3.2.1 Extraversion - current personality

Check CFA item loadings at T1

cfa_extra_curr <- '
# Define the latent factors
extra_curr1 =~ NA*bf05_01_t1 + lambda1*bf05_01_t1 + lambda2*bf05_06_t1 + lambda3*bf05_11_t1 + lambda4*bf05_16_t1 + lambda5*bf05_21_t1 + lambda6*bf05_26_t1 + lambda7*bf05_31_t1 + lambda8*bf05_36_t1 + lambda9*bf05_41_t1 + lambda10*bf05_46_t1 + lambda11*bf05_51_t1 + lambda12*bf05_56_t1

# Intercepts
bf05_01_t1 ~ i1*1
bf05_06_t1 ~ 1
bf05_11_t1 ~ 1
bf05_16_t1 ~ 1
bf05_21_t1 ~ 1
bf05_26_t1 ~ 1
bf05_31_t1 ~ 1
bf05_36_t1 ~ 1
bf05_41_t1 ~ 1
bf05_46_t1 ~ 1
bf05_51_t1 ~ 1
bf05_56_t1 ~ 1

# Unique Variances
bf05_01_t1 ~~ bf05_01_t1
bf05_06_t1 ~~ bf05_06_t1
bf05_11_t1 ~~ bf05_11_t1
bf05_16_t1 ~~ bf05_16_t1
bf05_21_t1 ~~ bf05_21_t1
bf05_26_t1 ~~ bf05_26_t1
bf05_31_t1 ~~ bf05_31_t1
bf05_36_t1 ~~ bf05_36_t1
bf05_41_t1 ~~ bf05_41_t1
bf05_46_t1 ~~ bf05_46_t1
bf05_51_t1 ~~ bf05_51_t1
bf05_56_t1 ~~ bf05_56_t1

# Latent Variable Means
extra_curr1 ~ 0*1

# Latent Variable Variances and Covariance
extra_curr1 ~~ 1*extra_curr1
'
fit_cfa_extra_curr <- cfa(cfa_extra_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_cfa_extra_curr, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 15 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           529         530
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                               586.241
  Degrees of freedom                                54
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                              2319.986
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.764
  Tucker-Lewis Index (TLI)                       0.711
                                                      
  Robust Comparative Fit Index (CFI)             0.764
  Robust Tucker-Lewis Index (TLI)                0.711

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -9321.205
  Loglikelihood unrestricted model (H1)      -9028.085
                                                      
  Akaike (AIC)                               18714.411
  Bayesian (BIC)                             18868.166
  Sample-size adjusted Bayesian (SABIC)      18753.892

Root Mean Square Error of Approximation:

  RMSEA                                          0.136
  90 Percent confidence interval - lower         0.127
  90 Percent confidence interval - upper         0.147
  P-value H_0: RMSEA <= 0.050                    0.000
  P-value H_0: RMSEA >= 0.080                    1.000
                                                      
  Robust RMSEA                                   0.136
  90 Percent confidence interval - lower         0.127
  90 Percent confidence interval - upper         0.147
  P-value H_0: Robust RMSEA <= 0.050             0.000
  P-value H_0: Robust RMSEA >= 0.080             1.000

Standardized Root Mean Square Residual:

  SRMR                                           0.078

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  extra_curr1 =~                                      
    b05_01_ (lmb1)    1.037    0.051   20.495    0.000
    b05_06_ (lmb2)    0.561    0.052   10.811    0.000
    b05_11_ (lmb3)   -0.485    0.051   -9.583    0.000
    b05_16_ (lmb4)   -0.790    0.051  -15.586    0.000
    b05_21_ (lmb5)    0.736    0.055   13.446    0.000
    b05_26_ (lmb6)   -0.611    0.057  -10.709    0.000
    b05_31_ (lmb7)   -0.769    0.047  -16.183    0.000
    b05_36_ (lmb8)   -0.535    0.048  -11.050    0.000
    b05_41_ (lmb9)    0.716    0.047   15.145    0.000
    b05_46_ (lm10)    0.893    0.055   16.239    0.000
    b05_51_ (lm11)   -0.606    0.052  -11.600    0.000
    b05_56_ (lm12)    0.709    0.045   15.859    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf05_01_1 (i1)    3.017    0.057   52.480    0.000
   .bf05_06_1         3.223    0.052   62.522    0.000
   .bf05_11_1         2.595    0.050   52.215    0.000
   .bf05_16_1         3.798    0.053   71.597    0.000
   .bf05_21_1         2.885    0.056   51.442    0.000
   .bf05_26_1         2.909    0.056   51.598    0.000
   .bf05_31_1         3.947    0.050   78.443    0.000
   .bf05_36_1         2.966    0.048   61.476    0.000
   .bf05_41_1         3.002    0.049   60.942    0.000
   .bf05_46_1         3.115    0.058   53.301    0.000
   .bf05_51_1         3.059    0.052   58.645    0.000
   .bf05_56_1         3.323    0.047   70.392    0.000
    extr_crr1         0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf05_01_t1        0.672    0.054   12.502    0.000
   .bf05_06_t1        1.091    0.070   15.561    0.000
   .bf05_11_t1        1.072    0.068   15.733    0.000
   .bf05_16_t1        0.865    0.061   14.247    0.000
   .bf05_21_t1        1.121    0.074   15.049    0.000
   .bf05_26_t1        1.309    0.084   15.523    0.000
   .bf05_31_t1        0.748    0.053   14.180    0.000
   .bf05_36_t1        0.945    0.061   15.528    0.000
   .bf05_41_t1        0.772    0.053   14.467    0.000
   .bf05_46_t1        1.009    0.071   14.261    0.000
   .bf05_51_t1        1.071    0.070   15.353    0.000
   .bf05_56_t1        0.676    0.047   14.326    0.000
    extra_curr1       1.000                           
tidy(fit_cfa_extra_curr) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                      op    label    estimate std.error std.all
   <chr>                     <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 extra_curr1 =~ bf05_01_t1 =~    lambda1     1.04     0.0506   0.785
 2 extra_curr1 =~ bf05_46_t1 =~    lambda10    0.893    0.0550   0.664
 3 extra_curr1 =~ bf05_16_t1 =~    lambda4    -0.790    0.0507  -0.647
 4 extra_curr1 =~ bf05_31_t1 =~    lambda7    -0.769    0.0475  -0.664
 5 extra_curr1 =~ bf05_21_t1 =~    lambda5     0.736    0.0548   0.571
 6 extra_curr1 =~ bf05_41_t1 =~    lambda9     0.716    0.0472   0.632
 7 extra_curr1 =~ bf05_56_t1 =~    lambda12    0.709    0.0447   0.653
 8 extra_curr1 =~ bf05_26_t1 =~    lambda6    -0.611    0.0570  -0.471
 9 extra_curr1 =~ bf05_51_t1 =~    lambda11   -0.606    0.0523  -0.506
10 extra_curr1 =~ bf05_06_t1 =~    lambda2     0.561    0.0519   0.473
11 extra_curr1 =~ bf05_36_t1 =~    lambda8    -0.535    0.0484  -0.482
12 extra_curr1 =~ bf05_11_t1 =~    lambda3    -0.485    0.0506  -0.424

Reverse-code and form parcels:

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(bf05_31_t1_r = bf05_31_t1,
         bf05_16_t1_r = bf05_16_t1,
         bf05_26_t1_r = bf05_26_t1,
         bf05_51_t1_r = bf05_51_t1,
         bf05_36_t1_r = bf05_36_t1,
         bf05_11_t1_r = bf05_11_t1,
         bf05_31_t2_r = bf05_31_t2,
         bf05_16_t2_r = bf05_16_t2,
         bf05_26_t2_r = bf05_26_t2,
         bf05_51_t2_r = bf05_51_t2,
         bf05_36_t2_r = bf05_36_t2,
         bf05_11_t2_r = bf05_11_t2) %>% 
  mutate(across(c(bf05_31_t1_r, bf05_16_t1_r, bf05_26_t1_r, bf05_51_t1_r, bf05_36_t1_r, bf05_11_t1_r, 
                  bf05_31_t2_r, bf05_16_t2_r, bf05_26_t2_r, bf05_51_t2_r, bf05_36_t2_r, bf05_11_t2_r), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(extra_curr_par1_t1 = rowMeans(across(c(bf05_01_t1, bf05_41_t1, bf05_51_t1_r, bf05_11_t1_r)), na.rm=T),
         extra_curr_par2_t1 = rowMeans(across(c(bf05_46_t1, bf05_21_t1, bf05_26_t1_r, bf05_36_t1_r)), na.rm=T),
         extra_curr_par3_t1 = rowMeans(across(c(bf05_16_t1_r, bf05_31_t1_r, bf05_56_t1, bf05_06_t1)), na.rm=T),
         extra_curr_par1_t2 = rowMeans(across(c(bf05_01_t2, bf05_41_t2, bf05_51_t2_r, bf05_11_t2_r)), na.rm=T),
         extra_curr_par2_t2 = rowMeans(across(c(bf05_46_t2, bf05_21_t2, bf05_26_t2_r, bf05_36_t2_r)), na.rm=T),
         extra_curr_par3_t2 = rowMeans(across(c(bf05_16_t2_r, bf05_31_t2_r, bf05_56_t2, bf05_06_t2)), na.rm=T))

3.2.2 Extraversion - ideal personality

Check CFA item loadings at T1

cfa_extra_ideal <- '
# Define the latent factors
extra_ideal1 =~ NA*bf06_01_t1 + lambda1*bf06_01_t1 + lambda2*bf06_06_t1 + lambda3*bf06_11_t1 + lambda4*bf06_16_t1 + lambda5*bf06_21_t1 + lambda6*bf06_26_t1 + lambda7*bf06_31_t1 + lambda8*bf06_36_t1 + lambda9*bf06_41_t1 + lambda10*bf06_46_t1 + lambda11*bf06_51_t1 + lambda12*bf06_56_t1

# Intercepts
bf06_01_t1 ~ i1*1
bf06_06_t1 ~ 1
bf06_11_t1 ~ 1
bf06_16_t1 ~ 1
bf06_21_t1 ~ 1
bf06_26_t1 ~ 1
bf06_31_t1 ~ 1
bf06_36_t1 ~ 1
bf06_41_t1 ~ 1
bf06_46_t1 ~ 1
bf06_51_t1 ~ 1
bf06_56_t1 ~ 1

# Unique Variances
bf06_01_t1 ~~ bf06_01_t1
bf06_06_t1 ~~ bf06_06_t1
bf06_11_t1 ~~ bf06_11_t1
bf06_16_t1 ~~ bf06_16_t1
bf06_21_t1 ~~ bf06_21_t1
bf06_26_t1 ~~ bf06_26_t1
bf06_31_t1 ~~ bf06_31_t1
bf06_36_t1 ~~ bf06_36_t1
bf06_41_t1 ~~ bf06_41_t1
bf06_46_t1 ~~ bf06_46_t1
bf06_51_t1 ~~ bf06_51_t1
bf06_56_t1 ~~ bf06_56_t1

# Latent Variable Means
extra_ideal1 ~ 0*1

# Latent Variable Variances and Covariance
extra_ideal1 ~~ 1*extra_ideal1
'
fit_cfa_extra_ideal <- cfa(cfa_extra_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), 
                           mimic = "mplus", missing="ML")
summary(fit_cfa_extra_ideal, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 16 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           174         175
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                               108.405
  Degrees of freedom                                54
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                               365.082
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.818
  Tucker-Lewis Index (TLI)                       0.778
                                                      
  Robust Comparative Fit Index (CFI)             0.818
  Robust Tucker-Lewis Index (TLI)                0.778

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -2843.995
  Loglikelihood unrestricted model (H1)      -2789.793
                                                      
  Akaike (AIC)                                5759.991
  Bayesian (BIC)                              5873.717
  Sample-size adjusted Bayesian (SABIC)       5759.718

Root Mean Square Error of Approximation:

  RMSEA                                          0.076
  90 Percent confidence interval - lower         0.055
  90 Percent confidence interval - upper         0.097
  P-value H_0: RMSEA <= 0.050                    0.022
  P-value H_0: RMSEA >= 0.080                    0.396
                                                      
  Robust RMSEA                                   0.076
  90 Percent confidence interval - lower         0.055
  90 Percent confidence interval - upper         0.097
  P-value H_0: Robust RMSEA <= 0.050             0.022
  P-value H_0: Robust RMSEA >= 0.080             0.396

Standardized Root Mean Square Residual:

  SRMR                                           0.063

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  extra_ideal1 =~                                     
    b06_01_ (lmb1)    0.549    0.070    7.808    0.000
    b06_06_ (lmb2)    0.318    0.075    4.225    0.000
    b06_11_ (lmb3)   -0.261    0.109   -2.392    0.017
    b06_16_ (lmb4)   -0.630    0.098   -6.443    0.000
    b06_21_ (lmb5)    0.512    0.086    5.935    0.000
    b06_26_ (lmb6)   -0.397    0.071   -5.598    0.000
    b06_31_ (lmb7)   -0.717    0.086   -8.315    0.000
    b06_36_ (lmb8)   -0.478    0.095   -5.025    0.000
    b06_41_ (lmb9)    0.429    0.066    6.458    0.000
    b06_46_ (lm10)    0.375    0.081    4.612    0.000
    b06_51_ (lm11)   -0.440    0.090   -4.883    0.000
    b06_56_ (lm12)    0.300    0.070    4.292    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf06_01_1 (i1)    4.092    0.068   60.062    0.000
   .bf06_06_1         4.161    0.068   60.865    0.000
   .bf06_11_1         2.109    0.096   21.975    0.000
   .bf06_16_1         2.943    0.092   31.911    0.000
   .bf06_21_1         3.960    0.079   49.891    0.000
   .bf06_26_1         1.632    0.066   24.865    0.000
   .bf06_31_1         2.287    0.084   27.230    0.000
   .bf06_36_1         2.190    0.087   25.082    0.000
   .bf06_41_1         4.402    0.062   70.564    0.000
   .bf06_46_1         3.730    0.074   50.326    0.000
   .bf06_51_1         2.356    0.082   28.572    0.000
   .bf06_56_1         4.218    0.063   66.535    0.000
    extra_dl1         0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf06_01_t1        0.506    0.066    7.676    0.000
   .bf06_06_t1        0.712    0.080    8.936    0.000
   .bf06_11_t1        1.535    0.167    9.193    0.000
   .bf06_16_t1        1.082    0.130    8.310    0.000
   .bf06_21_t1        0.834    0.100    8.341    0.000
   .bf06_26_t1        0.592    0.069    8.579    0.000
   .bf06_31_t1        0.713    0.098    7.248    0.000
   .bf06_36_t1        1.098    0.125    8.749    0.000
   .bf06_41_t1        0.493    0.060    8.262    0.000
   .bf06_46_t1        0.815    0.092    8.842    0.000
   .bf06_51_t1        0.990    0.113    8.778    0.000
   .bf06_56_t1        0.609    0.068    8.916    0.000
    extra_ideal1      1.000                           
tidy(fit_cfa_extra_ideal) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                       op    label    estimate std.error std.all
   <chr>                      <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 extra_ideal1 =~ bf06_31_t1 =~    lambda7    -0.717    0.0863  -0.647
 2 extra_ideal1 =~ bf06_16_t1 =~    lambda4    -0.630    0.0978  -0.518
 3 extra_ideal1 =~ bf06_01_t1 =~    lambda1     0.549    0.0703   0.611
 4 extra_ideal1 =~ bf06_21_t1 =~    lambda5     0.512    0.0863   0.489
 5 extra_ideal1 =~ bf06_36_t1 =~    lambda8    -0.478    0.0951  -0.415
 6 extra_ideal1 =~ bf06_51_t1 =~    lambda11   -0.440    0.0901  -0.404
 7 extra_ideal1 =~ bf06_41_t1 =~    lambda9     0.429    0.0664   0.521
 8 extra_ideal1 =~ bf06_26_t1 =~    lambda6    -0.397    0.0709  -0.458
 9 extra_ideal1 =~ bf06_46_t1 =~    lambda10    0.375    0.0814   0.384
10 extra_ideal1 =~ bf06_06_t1 =~    lambda2     0.318    0.0753   0.353
11 extra_ideal1 =~ bf06_56_t1 =~    lambda12    0.300    0.0699   0.359
12 extra_ideal1 =~ bf06_11_t1 =~    lambda3    -0.261    0.109   -0.206

Reverse-code and form parcels:

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(bf06_31_t1_r = bf06_31_t1,
         bf06_16_t1_r = bf06_16_t1,
         bf06_26_t1_r = bf06_26_t1,
         bf06_51_t1_r = bf06_51_t1,
         bf06_36_t1_r = bf06_36_t1,
         bf06_11_t1_r = bf06_11_t1,
         bf06_31_t2_r = bf06_31_t2,
         bf06_16_t2_r = bf06_16_t2,
         bf06_26_t2_r = bf06_26_t2,
         bf06_51_t2_r = bf06_51_t2,
         bf06_36_t2_r = bf06_36_t2,
         bf06_11_t2_r = bf06_11_t2) %>% 
  mutate(across(c(bf06_31_t1_r, bf06_16_t1_r, bf06_26_t1_r, bf06_51_t1_r, bf06_36_t1_r, bf06_11_t1_r, 
                  bf06_31_t2_r, bf06_16_t2_r, bf06_26_t2_r, bf06_51_t2_r, bf06_36_t2_r, bf06_11_t2_r), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(extra_ideal_par1_t1 = rowMeans(across(c(bf06_31_t1_r, bf06_51_t1_r, bf06_46_t1, bf06_11_t1_r)), na.rm=T),
         extra_ideal_par2_t1 = rowMeans(across(c(bf06_16_t1_r, bf06_36_t1_r, bf06_26_t1_r, bf06_56_t1)), na.rm=T),
         extra_ideal_par3_t1 = rowMeans(across(c(bf06_01_t1, bf06_21_t1, bf06_41_t1, bf06_06_t1)), na.rm=T),
         extra_ideal_par1_t2 = rowMeans(across(c(bf06_31_t2_r, bf06_51_t2_r, bf06_46_t2, bf06_11_t2_r)), na.rm=T),
         extra_ideal_par2_t2 = rowMeans(across(c(bf06_16_t2_r, bf06_36_t2_r, bf06_26_t2_r, bf06_56_t2)), na.rm=T),
         extra_ideal_par3_t2 = rowMeans(across(c(bf06_01_t2, bf06_21_t2, bf06_41_t2, bf06_06_t2)), na.rm=T))

3.2.3 Agreeableness - current personality

Check CFA item loadings at T1

cfa_agree_curr <- '
# Define the latent factors
agree_curr1 =~ NA*bf05_02_t1 + lambda1*bf05_02_t1 + lambda2*bf05_07_t1 + lambda3*bf05_12_t1 + lambda4*bf05_17_t1 + lambda5*bf05_22_t1 + lambda6*bf05_27_t1 + lambda7*bf05_32_t1 + lambda8*bf05_37_t1 + lambda9*bf05_42_t1 + lambda10*bf05_47_t1 + lambda11*bf05_52_t1 + lambda12*bf05_57_t1

# Intercepts
bf05_02_t1 ~ i1*1
bf05_07_t1 ~ 1
bf05_12_t1 ~ 1
bf05_17_t1 ~ 1
bf05_22_t1 ~ 1
bf05_27_t1 ~ 1
bf05_32_t1 ~ 1
bf05_37_t1 ~ 1
bf05_42_t1 ~ 1
bf05_47_t1 ~ 1
bf05_52_t1 ~ 1
bf05_57_t1 ~ 1

# Unique Variances
bf05_02_t1 ~~ bf05_02_t1
bf05_07_t1 ~~ bf05_07_t1
bf05_12_t1 ~~ bf05_12_t1
bf05_17_t1 ~~ bf05_17_t1
bf05_22_t1 ~~ bf05_22_t1
bf05_27_t1 ~~ bf05_27_t1
bf05_32_t1 ~~ bf05_32_t1
bf05_37_t1 ~~ bf05_37_t1
bf05_42_t1 ~~ bf05_42_t1
bf05_47_t1 ~~ bf05_47_t1
bf05_52_t1 ~~ bf05_52_t1
bf05_57_t1 ~~ bf05_57_t1

# Latent Variable Means
agree_curr1 ~ 0*1

# Latent Variable Variances and Covariance
agree_curr1 ~~ 1*agree_curr1
'
fit_cfa_agree_curr <- cfa(cfa_agree_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_cfa_agree_curr, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 20 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           529         530
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                               308.188
  Degrees of freedom                                54
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                              1434.558
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.814
  Tucker-Lewis Index (TLI)                       0.773
                                                      
  Robust Comparative Fit Index (CFI)             0.814
  Robust Tucker-Lewis Index (TLI)                0.773

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -8687.839
  Loglikelihood unrestricted model (H1)      -8533.745
                                                      
  Akaike (AIC)                               17447.677
  Bayesian (BIC)                             17601.433
  Sample-size adjusted Bayesian (SABIC)      17487.159

Root Mean Square Error of Approximation:

  RMSEA                                          0.094
  90 Percent confidence interval - lower         0.084
  90 Percent confidence interval - upper         0.105
  P-value H_0: RMSEA <= 0.050                    0.000
  P-value H_0: RMSEA >= 0.080                    0.990
                                                      
  Robust RMSEA                                   0.094
  90 Percent confidence interval - lower         0.084
  90 Percent confidence interval - upper         0.105
  P-value H_0: Robust RMSEA <= 0.050             0.000
  P-value H_0: Robust RMSEA >= 0.080             0.990

Standardized Root Mean Square Residual:

  SRMR                                           0.061

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  agree_curr1 =~                                      
    b05_02_ (lmb1)    0.489    0.036   13.767    0.000
    b05_07_ (lmb2)    0.404    0.029   14.046    0.000
    b05_12_ (lmb3)   -0.626    0.050  -12.413    0.000
    b05_17_ (lmb4)   -0.242    0.070   -3.472    0.001
    b05_22_ (lmb5)   -0.545    0.053  -10.203    0.000
    b05_27_ (lmb6)    0.615    0.051   11.961    0.000
    b05_32_ (lmb7)    0.504    0.041   12.371    0.000
    b05_37_ (lmb8)   -0.674    0.054  -12.558    0.000
    b05_42_ (lmb9)   -0.368    0.054   -6.781    0.000
    b05_47_ (lm10)   -0.714    0.053  -13.414    0.000
    b05_52_ (lm11)    0.420    0.032   13.050    0.000
    b05_57_ (lm12)    0.611    0.051   12.068    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf05_02_1 (i1)    4.321    0.036  121.608    0.000
   .bf05_07_1         4.505    0.029  156.371    0.000
   .bf05_12_1         2.802    0.050   56.576    0.000
   .bf05_17_1         2.699    0.064   42.458    0.000
   .bf05_22_1         2.227    0.051   43.517    0.000
   .bf05_27_1         3.637    0.050   72.600    0.000
   .bf05_32_1         4.021    0.040  100.182    0.000
   .bf05_37_1         2.527    0.053   47.877    0.000
   .bf05_42_1         3.376    0.050   67.691    0.000
   .bf05_47_1         2.457    0.053   46.331    0.000
   .bf05_52_1         4.282    0.032  135.111    0.000
   .bf05_57_1         3.302    0.049   67.014    0.000
    agre_crr1         0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf05_02_t1        0.429    0.030   14.208    0.000
   .bf05_07_t1        0.276    0.020   14.020    0.000
   .bf05_12_t1        0.906    0.062   14.651    0.000
   .bf05_17_t1        2.080    0.129   16.165    0.000
   .bf05_22_t1        1.088    0.071   15.248    0.000
   .bf05_27_t1        0.950    0.064   14.751    0.000
   .bf05_32_t1        0.598    0.041   14.738    0.000
   .bf05_37_t1        1.020    0.070   14.593    0.000
   .bf05_42_t1        1.181    0.075   15.802    0.000
   .bf05_47_t1        0.979    0.068   14.363    0.000
   .bf05_52_t1        0.355    0.025   14.309    0.000
   .bf05_57_t1        0.911    0.062   14.646    0.000
    agree_curr1       1.000                           
tidy(fit_cfa_agree_curr) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                      op    label    estimate std.error std.all
   <chr>                     <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 agree_curr1 =~ bf05_47_t1 =~    lambda10   -0.714    0.0532  -0.585
 2 agree_curr1 =~ bf05_37_t1 =~    lambda8    -0.674    0.0537  -0.555
 3 agree_curr1 =~ bf05_12_t1 =~    lambda3    -0.626    0.0504  -0.549
 4 agree_curr1 =~ bf05_27_t1 =~    lambda6     0.615    0.0514   0.533
 5 agree_curr1 =~ bf05_57_t1 =~    lambda12    0.611    0.0506   0.539
 6 agree_curr1 =~ bf05_22_t1 =~    lambda5    -0.545    0.0534  -0.463
 7 agree_curr1 =~ bf05_32_t1 =~    lambda7     0.504    0.0407   0.546
 8 agree_curr1 =~ bf05_02_t1 =~    lambda1     0.489    0.0355   0.598
 9 agree_curr1 =~ bf05_52_t1 =~    lambda11    0.420    0.0322   0.576
10 agree_curr1 =~ bf05_07_t1 =~    lambda2     0.404    0.0288   0.610
11 agree_curr1 =~ bf05_42_t1 =~    lambda9    -0.368    0.0542  -0.320
12 agree_curr1 =~ bf05_17_t1 =~    lambda4    -0.242    0.0696  -0.165

Reverse-code and form parcels:

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(bf05_47_t1_r = bf05_47_t1,
         bf05_37_t1_r = bf05_37_t1,
         bf05_22_t1_r = bf05_22_t1,
         bf05_12_t1_r = bf05_12_t1,
         bf05_42_t1_r = bf05_42_t1,
         bf05_17_t1_r = bf05_17_t1,
         bf05_47_t2_r = bf05_47_t2,
         bf05_37_t2_r = bf05_37_t2,
         bf05_22_t2_r = bf05_22_t2,
         bf05_12_t2_r = bf05_12_t2,
         bf05_42_t2_r = bf05_42_t2,
         bf05_17_t2_r = bf05_17_t2) %>% 
  mutate(across(c(bf05_47_t1_r, bf05_37_t1_r, bf05_22_t1_r, bf05_12_t1_r, bf05_42_t1_r, bf05_17_t1_r, 
                  bf05_47_t2_r, bf05_37_t2_r, bf05_22_t2_r, bf05_12_t2_r, bf05_42_t2_r, bf05_17_t2_r), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(agree_curr_par1_t1 = rowMeans(across(c(bf05_47_t1_r, bf05_22_t1_r, bf05_52_t1, bf05_17_t1_r)), na.rm=T),
         agree_curr_par2_t1 = rowMeans(across(c(bf05_37_t1_r, bf05_57_t1, bf05_02_t1, bf05_42_t1_r)), na.rm=T),
         agree_curr_par3_t1 = rowMeans(across(c(bf05_12_t1_r, bf05_27_t1, bf05_32_t1, bf05_07_t1)), na.rm=T),
         agree_curr_par1_t2 = rowMeans(across(c(bf05_47_t2_r, bf05_22_t2_r, bf05_52_t2, bf05_17_t2_r)), na.rm=T),
         agree_curr_par2_t2 = rowMeans(across(c(bf05_37_t2_r, bf05_57_t2, bf05_02_t2, bf05_42_t2_r)), na.rm=T),
         agree_curr_par3_t2 = rowMeans(across(c(bf05_12_t2_r, bf05_27_t2, bf05_32_t2, bf05_07_t2)), na.rm=T))

3.2.4 Agreeableness - ideal personality

Check CFA item loadings at T1

cfa_agree_ideal <- '
# Define the latent factors
agree_ideal1 =~ NA*bf06_02_t1 + lambda1*bf06_02_t1 + lambda2*bf06_07_t1 + lambda3*bf06_12_t1 + lambda4*bf06_17_t1 + lambda5*bf06_22_t1 + lambda6*bf06_27_t1 + lambda7*bf06_32_t1 + lambda8*bf06_37_t1 + lambda9*bf06_42_t1 + lambda10*bf06_47_t1 + lambda11*bf06_52_t1 + lambda12*bf06_57_t1

# Intercepts
bf06_02_t1 ~ i1*1
bf06_07_t1 ~ 1
bf06_12_t1 ~ 1
bf06_17_t1 ~ 1
bf06_22_t1 ~ 1
bf06_27_t1 ~ 1
bf06_32_t1 ~ 1
bf06_37_t1 ~ 1
bf06_42_t1 ~ 1
bf06_47_t1 ~ 1
bf06_52_t1 ~ 1
bf06_57_t1 ~ 1

# Unique Variances
bf06_02_t1 ~~ bf06_02_t1
bf06_07_t1 ~~ bf06_07_t1
bf06_12_t1 ~~ bf06_12_t1
bf06_17_t1 ~~ bf06_17_t1
bf06_22_t1 ~~ bf06_22_t1
bf06_27_t1 ~~ bf06_27_t1
bf06_32_t1 ~~ bf06_32_t1
bf06_37_t1 ~~ bf06_37_t1
bf06_42_t1 ~~ bf06_42_t1
bf06_47_t1 ~~ bf06_47_t1
bf06_52_t1 ~~ bf06_52_t1
bf06_57_t1 ~~ bf06_57_t1

# Latent Variable Means
agree_ideal1 ~ 0*1

# Latent Variable Variances and Covariance
agree_ideal1 ~~ 1*agree_ideal1
'
fit_cfa_agree_ideal <- cfa(cfa_agree_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), 
                           mimic = "mplus", missing="ML")
summary(fit_cfa_agree_ideal, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 23 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           174         175
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                                81.870
  Degrees of freedom                                54
  P-value (Chi-square)                           0.009

Model Test Baseline Model:

  Test statistic                               369.885
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.908
  Tucker-Lewis Index (TLI)                       0.888
                                                      
  Robust Comparative Fit Index (CFI)             0.908
  Robust Tucker-Lewis Index (TLI)                0.888

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -2727.336
  Loglikelihood unrestricted model (H1)      -2686.401
                                                      
  Akaike (AIC)                                5526.672
  Bayesian (BIC)                              5640.398
  Sample-size adjusted Bayesian (SABIC)       5526.400

Root Mean Square Error of Approximation:

  RMSEA                                          0.054
  90 Percent confidence interval - lower         0.028
  90 Percent confidence interval - upper         0.077
  P-value H_0: RMSEA <= 0.050                    0.359
  P-value H_0: RMSEA >= 0.080                    0.032
                                                      
  Robust RMSEA                                   0.054
  90 Percent confidence interval - lower         0.028
  90 Percent confidence interval - upper         0.077
  P-value H_0: Robust RMSEA <= 0.050             0.359
  P-value H_0: Robust RMSEA >= 0.080             0.032

Standardized Root Mean Square Residual:

  SRMR                                           0.054

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  agree_ideal1 =~                                     
    b06_02_ (lmb1)    0.474    0.070    6.823    0.000
    b06_07_ (lmb2)    0.281    0.044    6.360    0.000
    b06_12_ (lmb3)   -0.536    0.082   -6.496    0.000
    b06_17_ (lmb4)   -0.129    0.135   -0.951    0.341
    b06_22_ (lmb5)   -0.450    0.080   -5.626    0.000
    b06_27_ (lmb6)    0.620    0.085    7.340    0.000
    b06_32_ (lmb7)    0.366    0.079    4.602    0.000
    b06_37_ (lmb8)   -0.504    0.077   -6.580    0.000
    b06_42_ (lmb9)   -0.417    0.094   -4.445    0.000
    b06_47_ (lm10)   -0.510    0.083   -6.158    0.000
    b06_52_ (lm11)    0.335    0.054    6.239    0.000
    b06_57_ (lm12)    0.574    0.076    7.587    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf06_02_1 (i1)    4.299    0.067   64.507    0.000
   .bf06_07_1         4.724    0.042  113.168    0.000
   .bf06_12_1         1.983    0.079   25.232    0.000
   .bf06_17_1         2.678    0.120   22.318    0.000
   .bf06_22_1         1.707    0.075   22.775    0.000
   .bf06_27_1         3.862    0.082   47.201    0.000
   .bf06_32_1         4.270    0.073   58.321    0.000
   .bf06_37_1         1.718    0.073   23.550    0.000
   .bf06_42_1         2.690    0.085   31.464    0.000
   .bf06_47_1         1.810    0.078   23.175    0.000
   .bf06_52_1         4.557    0.050   90.415    0.000
   .bf06_57_1         3.730    0.074   50.631    0.000
    agree_dl1         0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf06_02_t1        0.548    0.066    8.271    0.000
   .bf06_07_t1        0.224    0.027    8.380    0.000
   .bf06_12_t1        0.787    0.094    8.395    0.000
   .bf06_17_t1        2.489    0.267    9.311    0.000
   .bf06_22_t1        0.774    0.090    8.652    0.000
   .bf06_27_t1        0.780    0.097    8.050    0.000
   .bf06_32_t1        0.799    0.090    8.904    0.000
   .bf06_37_t1        0.673    0.081    8.350    0.000
   .bf06_42_t1        1.098    0.123    8.896    0.000
   .bf06_47_t1        0.802    0.095    8.473    0.000
   .bf06_52_t1        0.330    0.039    8.375    0.000
   .bf06_57_t1        0.615    0.077    7.944    0.000
    agree_ideal1      1.000                           
tidy(fit_cfa_agree_ideal) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                       op    label    estimate std.error std.all
   <chr>                      <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 agree_ideal1 =~ bf06_27_t1 =~    lambda6     0.620    0.0845  0.575 
 2 agree_ideal1 =~ bf06_57_t1 =~    lambda12    0.574    0.0757  0.591 
 3 agree_ideal1 =~ bf06_12_t1 =~    lambda3    -0.536    0.0825 -0.517 
 4 agree_ideal1 =~ bf06_47_t1 =~    lambda10   -0.510    0.0828 -0.495 
 5 agree_ideal1 =~ bf06_37_t1 =~    lambda8    -0.504    0.0766 -0.523 
 6 agree_ideal1 =~ bf06_02_t1 =~    lambda1     0.474    0.0695  0.540 
 7 agree_ideal1 =~ bf06_22_t1 =~    lambda5    -0.450    0.0801 -0.456 
 8 agree_ideal1 =~ bf06_42_t1 =~    lambda9    -0.417    0.0937 -0.370 
 9 agree_ideal1 =~ bf06_32_t1 =~    lambda7     0.366    0.0795  0.379 
10 agree_ideal1 =~ bf06_52_t1 =~    lambda11    0.335    0.0537  0.504 
11 agree_ideal1 =~ bf06_07_t1 =~    lambda2     0.281    0.0442  0.510 
12 agree_ideal1 =~ bf06_17_t1 =~    lambda4    -0.129    0.135  -0.0812

Reverse-code and form parcels:

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(bf06_47_t1_r = bf06_47_t1,
         bf06_37_t1_r = bf06_37_t1,
         bf06_22_t1_r = bf06_22_t1,
         bf06_12_t1_r = bf06_12_t1,
         bf06_42_t1_r = bf06_42_t1,
         bf06_17_t1_r = bf06_17_t1,
         bf06_47_t2_r = bf06_47_t2,
         bf06_37_t2_r = bf06_37_t2,
         bf06_22_t2_r = bf06_22_t2,
         bf06_12_t2_r = bf06_12_t2,
         bf06_42_t2_r = bf06_42_t2,
         bf06_17_t2_r = bf06_17_t2) %>% 
  mutate(across(c(bf06_47_t1_r, bf06_37_t1_r, bf06_22_t1_r, bf06_12_t1_r, bf06_42_t1_r, bf06_17_t1_r, 
                  bf06_47_t2_r, bf06_37_t2_r, bf06_22_t2_r, bf06_12_t2_r, bf06_42_t2_r, bf06_17_t2_r), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(agree_ideal_par1_t1 = rowMeans(across(c(bf06_27_t1, bf06_02_t1, bf06_32_t1, bf06_17_t1_r)), na.rm=T),
         agree_ideal_par2_t1 = rowMeans(across(c(bf06_57_t1, bf06_37_t1_r, bf06_42_t1_r, bf06_07_t1)), na.rm=T),
         agree_ideal_par3_t1 = rowMeans(across(c(bf06_12_t1_r, bf06_47_t1_r, bf06_22_t1_r, bf06_52_t1)), na.rm=T),
         agree_ideal_par1_t2 = rowMeans(across(c(bf06_27_t2, bf06_02_t2, bf06_32_t2, bf06_17_t2_r)), na.rm=T),
         agree_ideal_par2_t2 = rowMeans(across(c(bf06_57_t2, bf06_37_t2_r, bf06_42_t2_r, bf06_07_t2)), na.rm=T),
         agree_ideal_par3_t2 = rowMeans(across(c(bf06_12_t2_r, bf06_47_t2_r, bf06_22_t2_r, bf06_52_t2)), na.rm=T))

3.2.5 Conscientiousness - current personality

Check CFA item loadings at T1

cfa_consc_curr <- '
# Define the latent factors
consc_curr1 =~ NA*bf05_03_t1 + lambda1*bf05_03_t1 + lambda2*bf05_08_t1 + lambda3*bf05_13_t1 + lambda4*bf05_18_t1 + lambda5*bf05_23_t1 + lambda6*bf05_28_t1 + lambda7*bf05_33_t1 + lambda8*bf05_38_t1 + lambda9*bf05_43_t1 + lambda10*bf05_48_t1 + lambda11*bf05_53_t1 + lambda12*bf05_58_t1
# Intercepts
bf05_03_t1 ~ i1*1
bf05_08_t1 ~ 1
bf05_13_t1 ~ 1
bf05_18_t1 ~ 1
bf05_23_t1 ~ 1
bf05_28_t1 ~ 1
bf05_33_t1 ~ 1
bf05_38_t1 ~ 1
bf05_43_t1 ~ 1
bf05_48_t1 ~ 1
bf05_53_t1 ~ 1
bf05_58_t1 ~ 1

# Unique Variances
bf05_03_t1 ~~ bf05_03_t1
bf05_08_t1 ~~ bf05_08_t1
bf05_13_t1 ~~ bf05_13_t1
bf05_18_t1 ~~ bf05_18_t1
bf05_23_t1 ~~ bf05_23_t1
bf05_28_t1 ~~ bf05_28_t1
bf05_33_t1 ~~ bf05_33_t1
bf05_38_t1 ~~ bf05_38_t1
bf05_43_t1 ~~ bf05_43_t1
bf05_48_t1 ~~ bf05_48_t1
bf05_53_t1 ~~ bf05_53_t1
bf05_58_t1 ~~ bf05_58_t1

# Latent Variable Means
consc_curr1 ~ 0*1

# Latent Variable Variances and Covariance
consc_curr1 ~~ 1*consc_curr1
'
fit_cfa_consc_curr <- cfa(cfa_consc_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_cfa_consc_curr, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 16 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           529         530
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                               473.373
  Degrees of freedom                                54
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                              2427.188
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.822
  Tucker-Lewis Index (TLI)                       0.783
                                                      
  Robust Comparative Fit Index (CFI)             0.822
  Robust Tucker-Lewis Index (TLI)                0.783

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -8913.589
  Loglikelihood unrestricted model (H1)      -8676.903
                                                      
  Akaike (AIC)                               17899.178
  Bayesian (BIC)                             18052.934
  Sample-size adjusted Bayesian (SABIC)      17938.660

Root Mean Square Error of Approximation:

  RMSEA                                          0.121
  90 Percent confidence interval - lower         0.111
  90 Percent confidence interval - upper         0.131
  P-value H_0: RMSEA <= 0.050                    0.000
  P-value H_0: RMSEA >= 0.080                    1.000
                                                      
  Robust RMSEA                                   0.121
  90 Percent confidence interval - lower         0.111
  90 Percent confidence interval - upper         0.131
  P-value H_0: Robust RMSEA <= 0.050             0.000
  P-value H_0: Robust RMSEA >= 0.080             1.000

Standardized Root Mean Square Residual:

  SRMR                                           0.067

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  consc_curr1 =~                                      
    b05_03_ (lmb1)    1.035    0.052   19.907    0.000
    b05_08_ (lmb2)    0.730    0.051   14.218    0.000
    b05_13_ (lmb3)   -0.367    0.046   -7.969    0.000
    b05_18_ (lmb4)   -0.814    0.048  -17.100    0.000
    b05_23_ (lmb5)    0.779    0.054   14.445    0.000
    b05_28_ (lmb6)    0.670    0.050   13.471    0.000
    b05_33_ (lmb7)   -0.923    0.047  -19.565    0.000
    b05_38_ (lmb8)   -0.631    0.041  -15.256    0.000
    b05_43_ (lmb9)   -0.421    0.039  -10.775    0.000
    b05_48_ (lm10)    0.781    0.048   16.299    0.000
    b05_53_ (lm11)   -0.661    0.045  -14.551    0.000
    b05_58_ (lm12)    0.641    0.052   12.300    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf05_03_1 (i1)    2.896    0.059   49.361    0.000
   .bf05_08_1         3.465    0.054   64.742    0.000
   .bf05_13_1         3.728    0.045   83.125    0.000
   .bf05_18_1         3.614    0.052   70.129    0.000
   .bf05_23_1         3.331    0.056   59.159    0.000
   .bf05_28_1         2.907    0.051   56.674    0.000
   .bf05_33_1         3.416    0.053   64.433    0.000
   .bf05_38_1         3.722    0.044   85.516    0.000
   .bf05_43_1         4.070    0.039  104.755    0.000
   .bf05_48_1         2.087    0.051   40.584    0.000
   .bf05_53_1         3.784    0.047   80.181    0.000
   .bf05_58_1         2.699    0.053   50.831    0.000
    cnsc_crr1         0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf05_03_t1        0.750    0.057   13.059    0.000
   .bf05_08_t1        0.982    0.065   15.089    0.000
   .bf05_13_t1        0.929    0.058   15.950    0.000
   .bf05_18_t1        0.743    0.052   14.274    0.000
   .bf05_23_t1        1.070    0.071   15.003    0.000
   .bf05_28_t1        0.944    0.062   15.225    0.000
   .bf05_33_t1        0.634    0.048   13.277    0.000
   .bf05_38_t1        0.604    0.041   14.697    0.000
   .bf05_43_t1        0.622    0.040   15.596    0.000
   .bf05_48_t1        0.789    0.054   14.615    0.000
   .bf05_53_t1        0.742    0.050   14.806    0.000
   .bf05_58_t1        1.081    0.070   15.457    0.000
    consc_curr1       1.000                           
tidy(fit_cfa_consc_curr) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                      op    label    estimate std.error std.all
   <chr>                     <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 consc_curr1 =~ bf05_03_t1 =~    lambda1     1.03     0.0520   0.767
 2 consc_curr1 =~ bf05_33_t1 =~    lambda7    -0.923    0.0472  -0.757
 3 consc_curr1 =~ bf05_18_t1 =~    lambda4    -0.814    0.0476  -0.687
 4 consc_curr1 =~ bf05_48_t1 =~    lambda10    0.781    0.0479   0.660
 5 consc_curr1 =~ bf05_23_t1 =~    lambda5     0.779    0.0539   0.602
 6 consc_curr1 =~ bf05_08_t1 =~    lambda2     0.730    0.0514   0.593
 7 consc_curr1 =~ bf05_28_t1 =~    lambda6     0.670    0.0497   0.568
 8 consc_curr1 =~ bf05_53_t1 =~    lambda11   -0.661    0.0454  -0.609
 9 consc_curr1 =~ bf05_58_t1 =~    lambda12    0.641    0.0521   0.525
10 consc_curr1 =~ bf05_38_t1 =~    lambda8    -0.631    0.0414  -0.631
11 consc_curr1 =~ bf05_43_t1 =~    lambda9    -0.421    0.0390  -0.471
12 consc_curr1 =~ bf05_13_t1 =~    lambda3    -0.367    0.0461  -0.356

Reverse-code and form parcels:

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(bf05_03_t1_r = bf05_03_t1,
         bf05_23_t1_r = bf05_23_t1,
         bf05_48_t1_r = bf05_48_t1,
         bf05_08_t1_r = bf05_08_t1,
         bf05_28_t1_r = bf05_28_t1,
         bf05_58_t1_r = bf05_58_t1,
         bf05_03_t2_r = bf05_03_t2,
         bf05_23_t2_r = bf05_23_t2,
         bf05_48_t2_r = bf05_48_t2,
         bf05_08_t2_r = bf05_08_t2,
         bf05_28_t2_r = bf05_28_t2,
         bf05_58_t2_r = bf05_58_t2) %>% 
  mutate(across(c(bf05_03_t1_r, bf05_23_t1_r, bf05_48_t1_r, bf05_08_t1_r, bf05_28_t1_r, bf05_58_t1_r,
                  bf05_03_t2_r, bf05_23_t2_r, bf05_48_t2_r, bf05_08_t2_r, bf05_28_t2_r, bf05_58_t2_r), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(consc_curr_par1_t1 = rowMeans(across(c(bf05_03_t1_r, bf05_08_t1_r, bf05_58_t1_r, bf05_13_t1)), na.rm=T),
         consc_curr_par2_t1 = rowMeans(across(c(bf05_33_t1, bf05_23_t1_r, bf05_53_t1, bf05_43_t1)), na.rm=T),
         consc_curr_par3_t1 = rowMeans(across(c(bf05_18_t1, bf05_48_t1_r, bf05_28_t1_r, bf05_38_t1)), na.rm=T),
         consc_curr_par1_t2 = rowMeans(across(c(bf05_03_t2_r, bf05_08_t2_r, bf05_58_t2_r, bf05_13_t2)), na.rm=T),
         consc_curr_par2_t2 = rowMeans(across(c(bf05_33_t2, bf05_23_t2_r, bf05_53_t2, bf05_43_t2)), na.rm=T),
         consc_curr_par3_t2 = rowMeans(across(c(bf05_18_t2, bf05_48_t2_r, bf05_28_t2_r, bf05_38_t2)), na.rm=T))

3.2.6 Conscientiousness - ideal personality

Check CFA item loadings at T1

cfa_consc_ideal <- '
# Define the latent factors
consc_ideal1 =~ NA*bf06_03_t1 + lambda1*bf06_03_t1 + lambda2*bf06_08_t1 + lambda3*bf06_13_t1 + lambda4*bf06_18_t1 + lambda5*bf06_23_t1 + lambda6*bf06_28_t1 + lambda7*bf06_33_t1 + lambda8*bf06_38_t1 + lambda9*bf06_43_t1 + lambda10*bf06_48_t1 + lambda11*bf06_53_t1 + lambda12*bf06_58_t1
# Intercepts
bf06_03_t1 ~ i1*1
bf06_08_t1 ~ 1
bf06_13_t1 ~ 1
bf06_18_t1 ~ 1
bf06_23_t1 ~ 1
bf06_28_t1 ~ 1
bf06_33_t1 ~ 1
bf06_38_t1 ~ 1
bf06_43_t1 ~ 1
bf06_48_t1 ~ 1
bf06_53_t1 ~ 1
bf06_58_t1 ~ 1

# Unique Variances
bf06_03_t1 ~~ bf06_03_t1
bf06_08_t1 ~~ bf06_08_t1
bf06_13_t1 ~~ bf06_13_t1
bf06_18_t1 ~~ bf06_18_t1
bf06_23_t1 ~~ bf06_23_t1
bf06_28_t1 ~~ bf06_28_t1
bf06_33_t1 ~~ bf06_33_t1
bf06_38_t1 ~~ bf06_38_t1
bf06_43_t1 ~~ bf06_43_t1
bf06_48_t1 ~~ bf06_48_t1
bf06_53_t1 ~~ bf06_53_t1
bf06_58_t1 ~~ bf06_58_t1

# Latent Variable Means
consc_ideal1 ~ 0*1

# Latent Variable Variances and Covariance
consc_ideal1 ~~ 1*consc_ideal1
'
fit_cfa_consc_ideal <- cfa(cfa_consc_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), 
                           mimic = "mplus", missing="ML")
summary(fit_cfa_consc_ideal, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 38 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           174         175
  Number of missing patterns                         2            

Model Test User Model:
                                                      
  Test statistic                                85.313
  Degrees of freedom                                54
  P-value (Chi-square)                           0.004

Model Test Baseline Model:

  Test statistic                               403.053
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.907
  Tucker-Lewis Index (TLI)                       0.886
                                                      
  Robust Comparative Fit Index (CFI)             0.907
  Robust Tucker-Lewis Index (TLI)                0.887

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -2492.025
  Loglikelihood unrestricted model (H1)      -2449.369
                                                      
  Akaike (AIC)                                5056.051
  Bayesian (BIC)                              5169.777
  Sample-size adjusted Bayesian (SABIC)       5055.778

Root Mean Square Error of Approximation:

  RMSEA                                          0.058
  90 Percent confidence interval - lower         0.033
  90 Percent confidence interval - upper         0.080
  P-value H_0: RMSEA <= 0.050                    0.277
  P-value H_0: RMSEA >= 0.080                    0.052
                                                      
  Robust RMSEA                                   0.058
  90 Percent confidence interval - lower         0.033
  90 Percent confidence interval - upper         0.080
  P-value H_0: Robust RMSEA <= 0.050             0.278
  P-value H_0: Robust RMSEA >= 0.080             0.052

Standardized Root Mean Square Residual:

  SRMR                                           0.059

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  consc_ideal1 =~                                     
    b06_03_ (lmb1)    0.422    0.094    4.477    0.000
    b06_08_ (lmb2)    0.415    0.071    5.875    0.000
    b06_13_ (lmb3)   -0.188    0.092   -2.049    0.040
    b06_18_ (lmb4)   -0.337    0.066   -5.103    0.000
    b06_23_ (lmb5)    0.521    0.071    7.346    0.000
    b06_28_ (lmb6)    0.423    0.086    4.944    0.000
    b06_33_ (lmb7)   -0.358    0.061   -5.859    0.000
    b06_38_ (lmb8)   -0.378    0.046   -8.194    0.000
    b06_43_ (lmb9)   -0.240    0.064   -3.735    0.000
    b06_48_ (lm10)    0.525    0.056    9.382    0.000
    b06_53_ (lm11)   -0.508    0.054   -9.483    0.000
    b06_58_ (lm12)    0.409    0.074    5.561    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf06_03_1 (i1)    1.759    0.088   20.010    0.000
   .bf06_08_1         1.638    0.068   24.244    0.000
   .bf06_13_1         4.259    0.083   51.458    0.000
   .bf06_18_1         4.379    0.062   70.457    0.000
   .bf06_23_1         1.540    0.070   22.088    0.000
   .bf06_28_1         1.977    0.080   24.704    0.000
   .bf06_33_1         4.425    0.058   76.044    0.000
   .bf06_38_1         4.667    0.046  100.975    0.000
   .bf06_43_1         4.500    0.059   76.232    0.000
   .bf06_48_1         1.377    0.058   23.799    0.000
   .bf06_53_1         4.546    0.055   81.965    0.000
   .bf06_58_1         1.747    0.070   25.084    0.000
    consc_dl1         0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf06_03_t1        1.166    0.130    8.995    0.000
   .bf06_08_t1        0.622    0.071    8.727    0.000
   .bf06_13_t1        1.157    0.125    9.259    0.000
   .bf06_18_t1        0.558    0.063    8.868    0.000
   .bf06_23_t1        0.575    0.069    8.284    0.000
   .bf06_28_t1        0.936    0.105    8.887    0.000
   .bf06_33_t1        0.461    0.053    8.708    0.000
   .bf06_38_t1        0.229    0.029    7.947    0.000
   .bf06_43_t1        0.549    0.060    9.091    0.000
   .bf06_48_t1        0.305    0.042    7.334    0.000
   .bf06_53_t1        0.277    0.038    7.300    0.000
   .bf06_58_t1        0.676    0.077    8.759    0.000
    consc_ideal1      1.000                           
tidy(fit_cfa_consc_ideal) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                       op    label    estimate std.error std.all
   <chr>                      <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 consc_ideal1 =~ bf06_48_t1 =~    lambda10    0.525    0.0560   0.689
 2 consc_ideal1 =~ bf06_23_t1 =~    lambda5     0.521    0.0709   0.566
 3 consc_ideal1 =~ bf06_53_t1 =~    lambda11   -0.508    0.0536  -0.695
 4 consc_ideal1 =~ bf06_28_t1 =~    lambda6     0.423    0.0856   0.401
 5 consc_ideal1 =~ bf06_03_t1 =~    lambda1     0.422    0.0942   0.364
 6 consc_ideal1 =~ bf06_08_t1 =~    lambda2     0.415    0.0706   0.466
 7 consc_ideal1 =~ bf06_58_t1 =~    lambda12    0.409    0.0736   0.446
 8 consc_ideal1 =~ bf06_38_t1 =~    lambda8    -0.378    0.0461  -0.620
 9 consc_ideal1 =~ bf06_33_t1 =~    lambda7    -0.358    0.0610  -0.466
10 consc_ideal1 =~ bf06_18_t1 =~    lambda4    -0.337    0.0661  -0.412
11 consc_ideal1 =~ bf06_43_t1 =~    lambda9    -0.240    0.0641  -0.308
12 consc_ideal1 =~ bf06_13_t1 =~    lambda3    -0.188    0.0915  -0.172

Reverse-code and form parcels:

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(bf06_03_t1_r = bf06_03_t1,
         bf06_23_t1_r = bf06_23_t1,
         bf06_48_t1_r = bf06_48_t1,
         bf06_08_t1_r = bf06_08_t1,
         bf06_28_t1_r = bf06_28_t1,
         bf06_58_t1_r = bf06_58_t1,
         bf06_03_t2_r = bf06_03_t2,
         bf06_23_t2_r = bf06_23_t2,
         bf06_48_t2_r = bf06_48_t2,
         bf06_08_t2_r = bf06_08_t2,
         bf06_28_t2_r = bf06_28_t2,
         bf06_58_t2_r = bf06_58_t2) %>% 
  mutate(across(c(bf06_03_t1_r, bf06_23_t1_r, bf06_48_t1_r, bf06_08_t1_r, bf06_28_t1_r, bf06_58_t1_r,
                  bf06_03_t2_r, bf06_23_t2_r, bf06_48_t2_r, bf06_08_t2_r, bf06_28_t2_r, bf06_58_t2_r), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(consc_ideal_par1_t1 = rowMeans(across(c(bf06_48_t1_r, bf06_08_t1_r, bf06_33_t1, bf06_13_t1)), na.rm=T),
         consc_ideal_par2_t1 = rowMeans(across(c(bf06_23_t1_r, bf06_03_t1_r, bf06_38_t1, bf06_43_t1)), na.rm=T),
         consc_ideal_par3_t1 = rowMeans(across(c(bf06_53_t1, bf06_28_t1_r, bf06_58_t1_r, bf06_18_t1)), na.rm=T),
         consc_ideal_par1_t2 = rowMeans(across(c(bf06_48_t2_r, bf06_08_t2_r, bf06_33_t2, bf06_13_t2)), na.rm=T),
         consc_ideal_par2_t2 = rowMeans(across(c(bf06_23_t2_r, bf06_03_t2_r, bf06_38_t2, bf06_43_t2)), na.rm=T),
         consc_ideal_par3_t2 = rowMeans(across(c(bf06_53_t2, bf06_28_t2_r, bf06_58_t2_r, bf06_18_t2)), na.rm=T))

3.2.7 Neuroticism - current personality

Check CFA item loadings at T1

cfa_neuro_curr <- '
#| warning: false

# Define the latent factors
neuro_curr1 =~ NA*bf05_04_t1 + lambda1*bf05_04_t1 + lambda2*bf05_09_t1 + lambda3*bf05_14_t1 + lambda4*bf05_19_t1 + lambda5*bf05_24_t1 + lambda6*bf05_29_t1 + lambda7*bf05_34_t1 + lambda8*bf05_39_t1 + lambda9*bf05_44_t1 + lambda10*bf05_49_t1 + lambda11*bf05_54_t1 + lambda12*bf05_59_t1

# Intercepts
bf05_04_t1 ~ i1*1
bf05_09_t1 ~ 1
bf05_14_t1 ~ 1
bf05_19_t1 ~ 1
bf05_24_t1 ~ 1
bf05_29_t1 ~ 1
bf05_34_t1 ~ 1
bf05_39_t1 ~ 1
bf05_44_t1 ~ 1
bf05_49_t1 ~ 1
bf05_54_t1 ~ 1
bf05_59_t1 ~ 1

# Unique Variances
bf05_04_t1 ~~ bf05_04_t1
bf05_09_t1 ~~ bf05_09_t1
bf05_14_t1 ~~ bf05_14_t1
bf05_19_t1 ~~ bf05_19_t1
bf05_24_t1 ~~ bf05_24_t1
bf05_29_t1 ~~ bf05_29_t1
bf05_34_t1 ~~ bf05_34_t1
bf05_39_t1 ~~ bf05_39_t1
bf05_44_t1 ~~ bf05_44_t1
bf05_49_t1 ~~ bf05_49_t1
bf05_54_t1 ~~ bf05_54_t1
bf05_59_t1 ~~ bf05_59_t1

# Latent Variable Means
neuro_curr1 ~ 0*1

# Latent Variable Variances and Covariance
neuro_curr1 ~~ 1*neuro_curr1
'
fit_cfa_neuro_curr <- cfa(cfa_neuro_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
Warning in lav_data_full(data = data, group = group, cluster = cluster, : lavaan WARNING: some cases are empty and will be ignored:
  191
summary(fit_cfa_neuro_curr, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 14 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           529         530
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                               468.967
  Degrees of freedom                                54
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                              3276.139
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.871
  Tucker-Lewis Index (TLI)                       0.842
                                                      
  Robust Comparative Fit Index (CFI)             0.871
  Robust Tucker-Lewis Index (TLI)                0.842

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -8968.375
  Loglikelihood unrestricted model (H1)      -8733.891
                                                      
  Akaike (AIC)                               18008.749
  Bayesian (BIC)                             18162.505
  Sample-size adjusted Bayesian (SABIC)      18048.231

Root Mean Square Error of Approximation:

  RMSEA                                          0.121
  90 Percent confidence interval - lower         0.111
  90 Percent confidence interval - upper         0.131
  P-value H_0: RMSEA <= 0.050                    0.000
  P-value H_0: RMSEA >= 0.080                    1.000
                                                      
  Robust RMSEA                                   0.121
  90 Percent confidence interval - lower         0.111
  90 Percent confidence interval - upper         0.131
  P-value H_0: Robust RMSEA <= 0.050             0.000
  P-value H_0: Robust RMSEA >= 0.080             1.000

Standardized Root Mean Square Residual:

  SRMR                                           0.052

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  neuro_curr1 =~                                      
    b05_04_ (lmb1)    0.924    0.049   18.742    0.000
    b05_09_ (lmb2)    0.781    0.050   15.715    0.000
    b05_14_ (lmb3)   -0.892    0.051  -17.320    0.000
    b05_19_ (lmb4)   -0.619    0.045  -13.822    0.000
    b05_24_ (lmb5)    0.737    0.051   14.464    0.000
    b05_29_ (lmb6)    1.006    0.047   21.320    0.000
    b05_34_ (lmb7)   -0.895    0.047  -18.926    0.000
    b05_39_ (lmb8)   -0.950    0.048  -19.949    0.000
    b05_44_ (lmb9)    0.784    0.045   17.367    0.000
    b05_49_ (lm10)    0.726    0.052   13.877    0.000
    b05_54_ (lm11)   -0.992    0.050  -19.931    0.000
    b05_59_ (lm12)   -0.874    0.051  -17.150    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf05_04_1 (i1)    2.820    0.055   50.879    0.000
   .bf05_09_1         2.994    0.054   55.829    0.000
   .bf05_14_1         3.121    0.057   54.964    0.000
   .bf05_19_1         3.550    0.047   75.245    0.000
   .bf05_24_1         3.257    0.054   60.205    0.000
   .bf05_29_1         3.057    0.055   55.388    0.000
   .bf05_34_1         3.732    0.053   69.884    0.000
   .bf05_39_1         3.234    0.054   59.500    0.000
   .bf05_44_1         3.355    0.050   67.515    0.000
   .bf05_49_1         2.397    0.055   43.476    0.000
   .bf05_54_1         3.144    0.057   55.339    0.000
   .bf05_59_1         3.040    0.056   54.262    0.000
    neur_crr1         0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf05_04_t1        0.771    0.053   14.533    0.000
   .bf05_09_t1        0.911    0.060   15.269    0.000
   .bf05_14_t1        0.911    0.061   14.973    0.000
   .bf05_19_t1        0.794    0.051   15.559    0.000
   .bf05_24_t1        1.005    0.065   15.463    0.000
   .bf05_29_t1        0.599    0.044   13.666    0.000
   .bf05_34_t1        0.707    0.048   14.569    0.000
   .bf05_39_t1        0.661    0.047   13.953    0.000
   .bf05_44_t1        0.692    0.047   14.854    0.000
   .bf05_49_t1        1.080    0.070   15.544    0.000
   .bf05_54_t1        0.724    0.052   13.981    0.000
   .bf05_59_t1        0.896    0.060   14.968    0.000
    neuro_curr1       1.000                           
tidy(fit_cfa_neuro_curr) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                      op    label    estimate std.error std.all
   <chr>                     <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 neuro_curr1 =~ bf05_29_t1 =~    lambda6     1.01     0.0472   0.793
 2 neuro_curr1 =~ bf05_54_t1 =~    lambda11   -0.992    0.0498  -0.759
 3 neuro_curr1 =~ bf05_39_t1 =~    lambda8    -0.950    0.0476  -0.760
 4 neuro_curr1 =~ bf05_04_t1 =~    lambda1     0.924    0.0493   0.725
 5 neuro_curr1 =~ bf05_34_t1 =~    lambda7    -0.895    0.0473  -0.729
 6 neuro_curr1 =~ bf05_14_t1 =~    lambda3    -0.892    0.0515  -0.683
 7 neuro_curr1 =~ bf05_59_t1 =~    lambda12   -0.874    0.0510  -0.678
 8 neuro_curr1 =~ bf05_44_t1 =~    lambda9     0.784    0.0451   0.686
 9 neuro_curr1 =~ bf05_09_t1 =~    lambda2     0.781    0.0497   0.633
10 neuro_curr1 =~ bf05_24_t1 =~    lambda5     0.737    0.0510   0.593
11 neuro_curr1 =~ bf05_49_t1 =~    lambda10    0.726    0.0523   0.573
12 neuro_curr1 =~ bf05_19_t1 =~    lambda4    -0.619    0.0448  -0.571

Reverse-code and form parcels:

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(bf05_29_t1_r = bf05_29_t1,
         bf05_04_t1_r = bf05_04_t1,
         bf05_24_t1_r = bf05_24_t1,
         bf05_44_t1_r = bf05_44_t1,
         bf05_09_t1_r = bf05_09_t1,
         bf05_49_t1_r = bf05_49_t1,
         bf05_29_t2_r = bf05_29_t2,
         bf05_04_t2_r = bf05_04_t2,
         bf05_24_t2_r = bf05_24_t2,
         bf05_44_t2_r = bf05_44_t2,
         bf05_09_t2_r = bf05_09_t2,
         bf05_49_t2_r = bf05_49_t2) %>% 
  mutate(across(c(bf05_29_t1_r, bf05_04_t1_r, bf05_24_t1_r, bf05_44_t1_r, bf05_09_t1_r, bf05_49_t1_r, 
                  bf05_29_t2_r, bf05_04_t2_r, bf05_24_t2_r, bf05_44_t2_r, bf05_09_t2_r, bf05_49_t2_r), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(neuro_curr_par1_t1 = rowMeans(across(c(bf05_29_t1_r, bf05_14_t1, bf05_09_t1_r, bf05_19_t1)), na.rm=T),
         neuro_curr_par2_t1 = rowMeans(across(c(bf05_54_t1, bf05_34_t1, bf05_44_t1_r, bf05_49_t1_r)), na.rm=T),
         neuro_curr_par3_t1 = rowMeans(across(c(bf05_39_t1, bf05_04_t1_r, bf05_59_t1, bf05_24_t1_r)), na.rm=T),
         neuro_curr_par1_t2 = rowMeans(across(c(bf05_29_t2_r, bf05_14_t2, bf05_09_t2_r, bf05_19_t2)), na.rm=T),
         neuro_curr_par2_t2 = rowMeans(across(c(bf05_54_t2, bf05_34_t2, bf05_44_t2_r, bf05_49_t2_r)), na.rm=T),
         neuro_curr_par3_t2 = rowMeans(across(c(bf05_39_t2, bf05_04_t2_r, bf05_59_t2, bf05_24_t2_r)), na.rm=T))

3.2.8 Neuroticism - ideal personality

Check CFA item loadings at T1

cfa_neuro_ideal <- '
# Define the latent factors
neuro_ideal1 =~ NA*bf06_04_t1 + lambda1*bf06_04_t1 + lambda2*bf06_09_t1 + lambda3*bf06_14_t1 + lambda4*bf06_19_t1 + lambda5*bf06_24_t1 + lambda6*bf06_29_t1 + lambda7*bf06_34_t1 + lambda8*bf06_39_t1 + lambda9*bf06_44_t1 + lambda10*bf06_49_t1 + lambda11*bf06_54_t1 + lambda12*bf06_59_t1

# Intercepts
bf06_04_t1 ~ i1*1
bf06_09_t1 ~ 1
bf06_14_t1 ~ 1
bf06_19_t1 ~ 1
bf06_24_t1 ~ 1
bf06_29_t1 ~ 1
bf06_34_t1 ~ 1
bf06_39_t1 ~ 1
bf06_44_t1 ~ 1
bf06_49_t1 ~ 1
bf06_54_t1 ~ 1
bf06_59_t1 ~ 1

# Unique Variances
bf06_04_t1 ~~ bf06_04_t1
bf06_09_t1 ~~ bf06_09_t1
bf06_14_t1 ~~ bf06_14_t1
bf06_19_t1 ~~ bf06_19_t1
bf06_24_t1 ~~ bf06_24_t1
bf06_29_t1 ~~ bf06_29_t1
bf06_34_t1 ~~ bf06_34_t1
bf06_39_t1 ~~ bf06_39_t1
bf06_44_t1 ~~ bf06_44_t1
bf06_49_t1 ~~ bf06_49_t1
bf06_54_t1 ~~ bf06_54_t1
bf06_59_t1 ~~ bf06_59_t1

# Latent Variable Means
neuro_ideal1 ~ 0*1

# Latent Variable Variances and Covariance
neuro_ideal1 ~~ 1*neuro_ideal1
'
fit_cfa_neuro_ideal <- cfa(cfa_neuro_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), 
                           mimic = "mplus", missing="ML")
summary(fit_cfa_neuro_ideal, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 20 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           174         175
  Number of missing patterns                         1            

Model Test User Model:
                                                      
  Test statistic                               103.156
  Degrees of freedom                                54
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                               389.315
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.848
  Tucker-Lewis Index (TLI)                       0.814
                                                      
  Robust Comparative Fit Index (CFI)             0.848
  Robust Tucker-Lewis Index (TLI)                0.814

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -2571.036
  Loglikelihood unrestricted model (H1)      -2519.458
                                                      
  Akaike (AIC)                                5214.072
  Bayesian (BIC)                              5327.798
  Sample-size adjusted Bayesian (SABIC)       5213.800

Root Mean Square Error of Approximation:

  RMSEA                                          0.072
  90 Percent confidence interval - lower         0.051
  90 Percent confidence interval - upper         0.093
  P-value H_0: RMSEA <= 0.050                    0.044
  P-value H_0: RMSEA >= 0.080                    0.289
                                                      
  Robust RMSEA                                   0.072
  90 Percent confidence interval - lower         0.051
  90 Percent confidence interval - upper         0.093
  P-value H_0: Robust RMSEA <= 0.050             0.044
  P-value H_0: Robust RMSEA >= 0.080             0.289

Standardized Root Mean Square Residual:

  SRMR                                           0.062

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  neuro_ideal1 =~                                     
    b06_04_ (lmb1)    0.216    0.060    3.608    0.000
    b06_09_ (lmb2)    0.298    0.059    5.090    0.000
    b06_14_ (lmb3)   -0.416    0.075   -5.548    0.000
    b06_19_ (lmb4)   -0.335    0.067   -5.009    0.000
    b06_24_ (lmb5)    0.232    0.065    3.600    0.000
    b06_29_ (lmb6)    0.329    0.088    3.733    0.000
    b06_34_ (lmb7)   -0.601    0.074   -8.141    0.000
    b06_39_ (lmb8)   -0.638    0.067   -9.460    0.000
    b06_44_ (lmb9)    0.311    0.065    4.780    0.000
    b06_49_ (lm10)    0.480    0.116    4.130    0.000
    b06_54_ (lm11)   -0.525    0.060   -8.790    0.000
    b06_59_ (lm12)   -0.507    0.077   -6.549    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf06_04_1 (i1)    4.563    0.055   83.323    0.000
   .bf06_09_1         4.425    0.055   80.928    0.000
   .bf06_14_1         1.661    0.070   23.676    0.000
   .bf06_19_1         1.954    0.062   31.618    0.000
   .bf06_24_1         4.638    0.059   79.061    0.000
   .bf06_29_1         4.299    0.080   53.640    0.000
   .bf06_34_1         1.787    0.072   24.654    0.000
   .bf06_39_1         1.632    0.068   23.963    0.000
   .bf06_44_1         4.351    0.060   72.304    0.000
   .bf06_49_1         3.580    0.107   33.572    0.000
   .bf06_54_1         1.460    0.060   24.304    0.000
   .bf06_59_1         1.931    0.074   25.987    0.000
    neuro_dl1         0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf06_04_t1        0.475    0.052    9.089    0.000
   .bf06_09_t1        0.431    0.049    8.834    0.000
   .bf06_14_t1        0.683    0.079    8.681    0.000
   .bf06_19_t1        0.552    0.063    8.788    0.000
   .bf06_24_t1        0.545    0.060    9.078    0.000
   .bf06_29_t1        1.009    0.111    9.051    0.000
   .bf06_34_t1        0.553    0.073    7.595    0.000
   .bf06_39_t1        0.401    0.060    6.690    0.000
   .bf06_44_t1        0.533    0.060    8.867    0.000
   .bf06_49_t1        1.749    0.194    9.006    0.000
   .bf06_54_t1        0.352    0.048    7.402    0.000
   .bf06_59_t1        0.703    0.083    8.441    0.000
    neuro_ideal1      1.000                           
tidy(fit_cfa_neuro_ideal) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                       op    label    estimate std.error std.all
   <chr>                      <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 neuro_ideal1 =~ bf06_39_t1 =~    lambda8    -0.638    0.0674  -0.710
 2 neuro_ideal1 =~ bf06_34_t1 =~    lambda7    -0.601    0.0738  -0.629
 3 neuro_ideal1 =~ bf06_54_t1 =~    lambda11   -0.525    0.0598  -0.663
 4 neuro_ideal1 =~ bf06_59_t1 =~    lambda12   -0.507    0.0775  -0.518
 5 neuro_ideal1 =~ bf06_49_t1 =~    lambda10    0.480    0.116    0.341
 6 neuro_ideal1 =~ bf06_14_t1 =~    lambda3    -0.416    0.0750  -0.450
 7 neuro_ideal1 =~ bf06_19_t1 =~    lambda4    -0.335    0.0670  -0.411
 8 neuro_ideal1 =~ bf06_29_t1 =~    lambda6     0.329    0.0882   0.311
 9 neuro_ideal1 =~ bf06_44_t1 =~    lambda9     0.311    0.0651   0.392
10 neuro_ideal1 =~ bf06_09_t1 =~    lambda2     0.298    0.0585   0.413
11 neuro_ideal1 =~ bf06_24_t1 =~    lambda5     0.232    0.0645   0.300
12 neuro_ideal1 =~ bf06_04_t1 =~    lambda1     0.216    0.0600   0.300

Reverse-code and form parcels:

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(bf06_29_t1_r = bf06_29_t1,
         bf06_04_t1_r = bf06_04_t1,
         bf06_24_t1_r = bf06_24_t1,
         bf06_44_t1_r = bf06_44_t1,
         bf06_09_t1_r = bf06_09_t1,
         bf06_49_t1_r = bf06_49_t1,
         bf06_29_t2_r = bf06_29_t2,
         bf06_04_t2_r = bf06_04_t2,
         bf06_24_t2_r = bf06_24_t2,
         bf06_44_t2_r = bf06_44_t2,
         bf06_09_t2_r = bf06_09_t2,
         bf06_49_t2_r = bf06_49_t2) %>% 
  mutate(across(c(bf06_29_t1_r, bf06_04_t1_r, bf06_24_t1_r, bf06_44_t1_r, bf06_09_t1_r, bf06_49_t1_r, 
                  bf06_29_t2_r, bf06_04_t2_r, bf06_24_t2_r, bf06_44_t2_r, bf06_09_t2_r, bf06_49_t2_r), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(neuro_ideal_par1_t1 = rowMeans(across(c(bf06_39_t1, bf06_14_t1, bf06_44_t1_r, bf06_04_t1_r)), na.rm=T),
         neuro_ideal_par2_t1 = rowMeans(across(c(bf06_34_t1, bf06_49_t1, bf06_29_t1_r, bf06_24_t1_r)), na.rm=T),
         neuro_ideal_par3_t1 = rowMeans(across(c(bf06_54_t1, bf06_59_t1, bf06_19_t1, bf06_09_t1_r)), na.rm=T),
         neuro_ideal_par1_t2 = rowMeans(across(c(bf06_39_t2, bf06_14_t2, bf06_44_t2_r, bf06_04_t2_r)), na.rm=T),
         neuro_ideal_par2_t2 = rowMeans(across(c(bf06_34_t2, bf06_49_t2, bf06_29_t2_r, bf06_24_t2_r)), na.rm=T),
         neuro_ideal_par3_t2 = rowMeans(across(c(bf06_54_t2, bf06_59_t2, bf06_19_t2, bf06_09_t2_r)), na.rm=T))

3.2.9 Openness - current personality

Check CFA item loadings at T1

cfa_openn_curr <- '
# Define the latent factors
openn_curr1 =~ NA*bf05_05_t1 + lambda1*bf05_05_t1 + lambda2*bf05_10_t1 + lambda3*bf05_15_t1 + lambda4*bf05_20_t1 + lambda5*bf05_25_t1 + lambda6*bf05_30_t1 + lambda7*bf05_35_t1 + lambda8*bf05_40_t1 + lambda9*bf05_45_t1 + lambda10*bf05_50_t1 + lambda11*bf05_55_t1 + lambda12*bf05_60_t1

# Intercepts
bf05_05_t1 ~ i1*1
bf05_10_t1 ~ 1
bf05_15_t1 ~ 1
bf05_20_t1 ~ 1
bf05_25_t1 ~ 1
bf05_30_t1 ~ 1
bf05_35_t1 ~ 1
bf05_40_t1 ~ 1
bf05_45_t1 ~ 1
bf05_50_t1 ~ 1
bf05_55_t1 ~ 1
bf05_60_t1 ~ 1

# Unique Variances
bf05_05_t1 ~~ bf05_05_t1
bf05_10_t1 ~~ bf05_10_t1
bf05_15_t1 ~~ bf05_15_t1
bf05_20_t1 ~~ bf05_20_t1
bf05_25_t1 ~~ bf05_25_t1
bf05_30_t1 ~~ bf05_30_t1
bf05_35_t1 ~~ bf05_35_t1
bf05_40_t1 ~~ bf05_40_t1
bf05_45_t1 ~~ bf05_45_t1
bf05_50_t1 ~~ bf05_50_t1
bf05_55_t1 ~~ bf05_55_t1
bf05_60_t1 ~~ bf05_60_t1

# Latent Variable Means
openn_curr1 ~ 0*1

# Latent Variable Variances and Covariance
openn_curr1 ~~ 1*openn_curr1
'
fit_cfa_openn_curr <- cfa(cfa_openn_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_cfa_openn_curr, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 38 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           529         530
  Number of missing patterns                         3            

Model Test User Model:
                                                      
  Test statistic                               485.860
  Degrees of freedom                                54
  P-value (Chi-square)                           0.000

Model Test Baseline Model:

  Test statistic                              1752.038
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.744
  Tucker-Lewis Index (TLI)                       0.687
                                                      
  Robust Comparative Fit Index (CFI)             0.744
  Robust Tucker-Lewis Index (TLI)                0.687

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -9023.572
  Loglikelihood unrestricted model (H1)      -8780.642
                                                      
  Akaike (AIC)                               18119.144
  Bayesian (BIC)                             18272.900
  Sample-size adjusted Bayesian (SABIC)      18158.626

Root Mean Square Error of Approximation:

  RMSEA                                          0.123
  90 Percent confidence interval - lower         0.113
  90 Percent confidence interval - upper         0.133
  P-value H_0: RMSEA <= 0.050                    0.000
  P-value H_0: RMSEA >= 0.080                    1.000
                                                      
  Robust RMSEA                                   0.123
  90 Percent confidence interval - lower         0.113
  90 Percent confidence interval - upper         0.133
  P-value H_0: Robust RMSEA <= 0.050             0.000
  P-value H_0: Robust RMSEA >= 0.080             1.000

Standardized Root Mean Square Residual:

  SRMR                                           0.071

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  openn_curr1 =~                                      
    b05_05_ (lmb1)    0.264    0.062    4.250    0.000
    b05_10_ (lmb2)   -0.472    0.039  -12.041    0.000
    b05_15_ (lmb3)   -0.496    0.045  -10.959    0.000
    b05_20_ (lmb4)   -0.718    0.052  -13.924    0.000
    b05_25_ (lmb5)    0.508    0.052    9.740    0.000
    b05_30_ (lmb6)    0.763    0.054   14.191    0.000
    b05_35_ (lmb7)   -0.737    0.044  -16.590    0.000
    b05_40_ (lmb8)   -0.396    0.044   -9.033    0.000
    b05_45_ (lmb9)    0.600    0.049   12.235    0.000
    b05_50_ (lm10)    0.733    0.057   12.840    0.000
    b05_55_ (lm11)    0.656    0.051   12.799    0.000
    b05_60_ (lm12)   -0.612    0.045  -13.592    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf05_05_1 (i1)    2.947    0.057   51.273    0.000
   .bf05_10_1         4.257    0.039  110.013    0.000
   .bf05_15_1         3.756    0.043   86.840    0.000
   .bf05_20_1         3.917    0.050   77.742    0.000
   .bf05_25_1         2.146    0.050   42.723    0.000
   .bf05_30_1         2.514    0.054   46.215    0.000
   .bf05_35_1         3.964    0.045   87.351    0.000
   .bf05_40_1         4.030    0.042   96.362    0.000
   .bf05_45_1         1.991    0.048   41.146    0.000
   .bf05_50_1         2.378    0.057   41.878    0.000
   .bf05_55_1         2.461    0.051   48.058    0.000
   .bf05_60_1         3.603    0.044   81.225    0.000
    opnn_crr1         0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf05_05_t1        1.678    0.104   16.129    0.000
   .bf05_10_t1        0.570    0.038   14.974    0.000
   .bf05_15_t1        0.744    0.050   14.897    0.000
   .bf05_20_t1        0.828    0.062   13.363    0.000
   .bf05_25_t1        1.076    0.070   15.472    0.000
   .bf05_30_t1        0.982    0.069   14.206    0.000
   .bf05_35_t1        0.547    0.045   12.077    0.000
   .bf05_40_t1        0.769    0.049   15.585    0.000
   .bf05_45_t1        0.878    0.059   14.851    0.000
   .bf05_50_t1        1.168    0.080   14.680    0.000
   .bf05_55_t1        0.955    0.064   14.810    0.000
   .bf05_60_t1        0.667    0.048   13.910    0.000
    openn_curr1       1.000                           
tidy(fit_cfa_openn_curr) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                      op    label    estimate std.error std.all
   <chr>                     <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 openn_curr1 =~ bf05_30_t1 =~    lambda6     0.763    0.0538   0.610
 2 openn_curr1 =~ bf05_35_t1 =~    lambda7    -0.737    0.0444  -0.706
 3 openn_curr1 =~ bf05_50_t1 =~    lambda10    0.733    0.0571   0.561
 4 openn_curr1 =~ bf05_20_t1 =~    lambda4    -0.718    0.0515  -0.619
 5 openn_curr1 =~ bf05_55_t1 =~    lambda11    0.656    0.0512   0.557
 6 openn_curr1 =~ bf05_60_t1 =~    lambda12   -0.612    0.0450  -0.600
 7 openn_curr1 =~ bf05_45_t1 =~    lambda9     0.600    0.0490   0.539
 8 openn_curr1 =~ bf05_25_t1 =~    lambda5     0.508    0.0522   0.440
 9 openn_curr1 =~ bf05_15_t1 =~    lambda3    -0.496    0.0452  -0.498
10 openn_curr1 =~ bf05_10_t1 =~    lambda2    -0.472    0.0392  -0.530
11 openn_curr1 =~ bf05_40_t1 =~    lambda8    -0.396    0.0438  -0.411
12 openn_curr1 =~ bf05_05_t1 =~    lambda1     0.264    0.0622   0.200

Reverse-code and form parcels:

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(bf05_50_t1_r = bf05_50_t1,
         bf05_55_t1_r = bf05_55_t1,
         bf05_30_t1_r = bf05_30_t1,
         bf05_25_t1_r = bf05_25_t1,
         bf05_45_t1_r = bf05_45_t1,
         bf05_05_t1_r = bf05_05_t1,
         bf05_50_t2_r = bf05_50_t2,
         bf05_55_t2_r = bf05_55_t2,
         bf05_30_t2_r = bf05_30_t2,
         bf05_25_t2_r = bf05_25_t2,
         bf05_45_t2_r = bf05_45_t2,
         bf05_05_t2_r = bf05_05_t2) %>% 
  mutate(across(c(bf05_50_t1_r, bf05_55_t1_r, bf05_30_t1_r, bf05_25_t1_r, bf05_45_t1_r, bf05_05_t1_r,
                  bf05_50_t2_r, bf05_55_t2_r, bf05_30_t2_r, bf05_25_t2_r, bf05_45_t2_r, bf05_05_t2_r), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(openn_curr_par1_t1 = rowMeans(across(c(bf05_30_t1_r, bf05_60_t1, bf05_15_t1, bf05_05_t1_r)), na.rm=T),
         openn_curr_par2_t1 = rowMeans(across(c(bf05_35_t1, bf05_55_t1_r, bf05_25_t1_r, bf05_40_t1)), na.rm=T),
         openn_curr_par3_t1 = rowMeans(across(c(bf05_50_t1_r, bf05_20_t1, bf05_45_t1_r, bf05_10_t1)), na.rm=T),
         openn_curr_par1_t2 = rowMeans(across(c(bf05_30_t2_r, bf05_60_t2, bf05_15_t2, bf05_05_t2_r)), na.rm=T),
         openn_curr_par2_t2 = rowMeans(across(c(bf05_35_t2, bf05_55_t2_r, bf05_25_t2_r, bf05_40_t2)), na.rm=T),
         openn_curr_par3_t2 = rowMeans(across(c(bf05_50_t2_r, bf05_20_t2, bf05_45_t2_r, bf05_10_t2)), na.rm=T))

3.2.10 Openness - ideal personality

Check CFA item loadings at T1

cfa_openn_ideal <- '
# Define the latent factors
openn_ideal1 =~ NA*bf06_05_t1 + lambda1*bf06_05_t1 + lambda2*bf06_10_t1 + lambda3*bf06_15_t1 + lambda4*bf06_20_t1 + lambda5*bf06_25_t1 + lambda6*bf06_30_t1 + lambda7*bf06_35_t1 + lambda8*bf06_40_t1 + lambda9*bf06_45_t1 + lambda10*bf06_50_t1 + lambda11*bf06_55_t1 + lambda12*bf06_60_t1

# Intercepts
bf06_05_t1 ~ i1*1
bf06_10_t1 ~ 1
bf06_15_t1 ~ 1
bf06_20_t1 ~ 1
bf06_25_t1 ~ 1
bf06_30_t1 ~ 1
bf06_35_t1 ~ 1
bf06_40_t1 ~ 1
bf06_45_t1 ~ 1
bf06_50_t1 ~ 1
bf06_55_t1 ~ 1
bf06_60_t1 ~ 1

# Unique Variances
bf06_05_t1 ~~ bf06_05_t1
bf06_10_t1 ~~ bf06_10_t1
bf06_15_t1 ~~ bf06_15_t1
bf06_20_t1 ~~ bf06_20_t1
bf06_25_t1 ~~ bf06_25_t1
bf06_30_t1 ~~ bf06_30_t1
bf06_35_t1 ~~ bf06_35_t1
bf06_40_t1 ~~ bf06_40_t1
bf06_45_t1 ~~ bf06_45_t1
bf06_50_t1 ~~ bf06_50_t1
bf06_55_t1 ~~ bf06_55_t1
bf06_60_t1 ~~ bf06_60_t1

# Latent Variable Means
openn_ideal1 ~ 0*1

# Latent Variable Variances and Covariance
openn_ideal1 ~~ 1*openn_ideal1
'
fit_cfa_openn_ideal <- cfa(cfa_openn_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), 
                           mimic = "mplus", missing="ML")
summary(fit_cfa_openn_ideal, fit.measures = TRUE)
lavaan 0.6.15 ended normally after 43 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        36

                                                  Used       Total
  Number of observations                           174         175
  Number of missing patterns                         2            

Model Test User Model:
                                                      
  Test statistic                                91.003
  Degrees of freedom                                54
  P-value (Chi-square)                           0.001

Model Test Baseline Model:

  Test statistic                               409.313
  Degrees of freedom                                66
  P-value                                        0.000

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.892
  Tucker-Lewis Index (TLI)                       0.868
                                                      
  Robust Comparative Fit Index (CFI)             0.892
  Robust Tucker-Lewis Index (TLI)                0.868

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -2848.673
  Loglikelihood unrestricted model (H1)      -2803.172
                                                      
  Akaike (AIC)                                5769.347
  Bayesian (BIC)                              5883.073
  Sample-size adjusted Bayesian (SABIC)       5769.074

Root Mean Square Error of Approximation:

  RMSEA                                          0.063
  90 Percent confidence interval - lower         0.039
  90 Percent confidence interval - upper         0.085
  P-value H_0: RMSEA <= 0.050                    0.169
  P-value H_0: RMSEA >= 0.080                    0.102
                                                      
  Robust RMSEA                                   0.063
  90 Percent confidence interval - lower         0.039
  90 Percent confidence interval - upper         0.085
  P-value H_0: Robust RMSEA <= 0.050             0.168
  P-value H_0: Robust RMSEA >= 0.080             0.103

Standardized Root Mean Square Residual:

  SRMR                                           0.057

Parameter Estimates:

  Standard errors                             Standard
  Information                                 Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)
  openn_ideal1 =~                                     
    b06_05_ (lmb1)    0.140    0.126    1.112    0.266
    b06_10_ (lmb2)   -0.305    0.067   -4.559    0.000
    b06_15_ (lmb3)   -0.332    0.055   -6.002    0.000
    b06_20_ (lmb4)   -0.544    0.073   -7.405    0.000
    b06_25_ (lmb5)    0.453    0.078    5.782    0.000
    b06_30_ (lmb6)    0.662    0.109    6.048    0.000
    b06_35_ (lmb7)   -0.611    0.069   -8.887    0.000
    b06_40_ (lmb8)   -0.457    0.091   -5.033    0.000
    b06_45_ (lmb9)    0.599    0.088    6.787    0.000
    b06_50_ (lm10)    0.447    0.094    4.756    0.000
    b06_55_ (lm11)    0.599    0.093    6.465    0.000
    b06_60_ (lm12)   -0.523    0.059   -8.796    0.000

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf06_05_1 (i1)    3.144    0.113   27.936    0.000
   .bf06_10_1         4.454    0.062   71.608    0.000
   .bf06_15_1         4.603    0.053   87.585    0.000
   .bf06_20_1         4.305    0.072   60.189    0.000
   .bf06_25_1         1.672    0.075   22.429    0.000
   .bf06_30_1         2.029    0.104   19.437    0.000
   .bf06_35_1         4.310    0.069   62.665    0.000
   .bf06_40_1         3.931    0.085   46.202    0.000
   .bf06_45_1         1.707    0.085   20.113    0.000
   .bf06_50_1         2.011    0.088   22.980    0.000
   .bf06_55_1         2.103    0.089   23.618    0.000
   .bf06_60_1         4.489    0.060   75.334    0.000
    openn_dl1         0.000                           

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)
   .bf06_05_t1        2.184    0.235    9.307    0.000
   .bf06_10_t1        0.580    0.065    8.963    0.000
   .bf06_15_t1        0.370    0.043    8.617    0.000
   .bf06_20_t1        0.594    0.073    8.086    0.000
   .bf06_25_t1        0.762    0.087    8.725    0.000
   .bf06_30_t1        1.449    0.169    8.584    0.000
   .bf06_35_t1        0.450    0.063    7.201    0.000
   .bf06_40_t1        1.051    0.118    8.870    0.000
   .bf06_45_t1        0.895    0.107    8.340    0.000
   .bf06_50_t1        1.133    0.127    8.913    0.000
   .bf06_55_t1        1.022    0.120    8.514    0.000
   .bf06_60_t1        0.344    0.047    7.316    0.000
    openn_ideal1      1.000                           
tidy(fit_cfa_openn_ideal) %>% filter(str_detect(label, "lambda")) %>% mutate(abs_loading = abs(estimate)) %>% arrange(desc(abs_loading)) %>% select(1:5, std.all)
# A tibble: 12 × 6
   term                       op    label    estimate std.error std.all
   <chr>                      <chr> <chr>       <dbl>     <dbl>   <dbl>
 1 openn_ideal1 =~ bf06_30_t1 =~    lambda6     0.662    0.109   0.482 
 2 openn_ideal1 =~ bf06_35_t1 =~    lambda7    -0.611    0.0687 -0.673 
 3 openn_ideal1 =~ bf06_45_t1 =~    lambda9     0.599    0.0882  0.535 
 4 openn_ideal1 =~ bf06_55_t1 =~    lambda11    0.599    0.0926  0.510 
 5 openn_ideal1 =~ bf06_20_t1 =~    lambda4    -0.544    0.0735 -0.577 
 6 openn_ideal1 =~ bf06_60_t1 =~    lambda12   -0.523    0.0595 -0.666 
 7 openn_ideal1 =~ bf06_40_t1 =~    lambda8    -0.457    0.0908 -0.407 
 8 openn_ideal1 =~ bf06_25_t1 =~    lambda5     0.453    0.0783  0.460 
 9 openn_ideal1 =~ bf06_50_t1 =~    lambda10    0.447    0.0940  0.387 
10 openn_ideal1 =~ bf06_15_t1 =~    lambda3    -0.332    0.0553 -0.479 
11 openn_ideal1 =~ bf06_10_t1 =~    lambda2    -0.305    0.0668 -0.371 
12 openn_ideal1 =~ bf06_05_t1 =~    lambda1     0.140    0.126   0.0940

Reverse-code and form parcels:

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(bf06_50_t1_r = bf06_50_t1,
         bf06_55_t1_r = bf06_55_t1,
         bf06_30_t1_r = bf06_30_t1,
         bf06_25_t1_r = bf06_25_t1,
         bf06_45_t1_r = bf06_45_t1,
         bf06_05_t1_r = bf06_05_t1,
         bf06_50_t2_r = bf06_50_t2,
         bf06_55_t2_r = bf06_55_t2,
         bf06_30_t2_r = bf06_30_t2,
         bf06_25_t2_r = bf06_25_t2,
         bf06_45_t2_r = bf06_45_t2,
         bf06_05_t2_r = bf06_05_t2) %>% 
  mutate(across(c(bf06_50_t1_r, bf06_55_t1_r, bf06_30_t1_r, bf06_25_t1_r, bf06_45_t1_r, bf06_05_t1_r,
                  bf06_50_t2_r, bf06_55_t2_r, bf06_30_t2_r, bf06_25_t2_r, bf06_45_t2_r, bf06_05_t2_r), 
                ~ recode(.x, `1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L, .default = NA_integer_))) %>% 
  mutate(openn_ideal_par1_t1 = rowMeans(across(c(bf06_30_t1_r, bf06_60_t1, bf06_50_t1_r, bf06_05_t1_r)), na.rm=T),
         openn_ideal_par2_t1 = rowMeans(across(c(bf06_35_t1, bf06_20_t1, bf06_25_t1_r, bf06_10_t1)), na.rm=T),
         openn_ideal_par3_t1 = rowMeans(across(c(bf06_45_t1_r, bf06_55_t1_r, bf06_40_t1, bf06_15_t1)), na.rm=T),
         openn_ideal_par1_t2 = rowMeans(across(c(bf06_30_t2_r, bf06_60_t2, bf06_50_t2_r, bf06_05_t2_r)), na.rm=T),
         openn_ideal_par2_t2 = rowMeans(across(c(bf06_35_t2, bf06_20_t2, bf06_25_t2_r, bf06_10_t2)), na.rm=T),
         openn_ideal_par3_t2 = rowMeans(across(c(bf06_45_t2_r, bf06_55_t2_r, bf06_40_t2, bf06_15_t2)), na.rm=T))

# replace NaN with regular NA
df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
    mutate_all(~ifelse(is.nan(.), NA, .))

3.3 Recode all reverse-keyed items

This is only relevant for the facet-level models. All trait-level models rely on the parcels created above. However, the facet-level models pick the first item as the anchor indicator which is sometimes reverse-keyed for the latent factor. For analyses below I want to use just positively keyed variables (for neuroticism facets higher values should indicate higher neuroticism).

# save all relevant variable names
names_rev_scored <- df_sbsa3_wide_pers %>% select(ends_with("_r")) %>% colnames()
names_rev_scored_orig <- gsub("_r", "", names_rev_scored) # variable names without the "_r" -> these are the original variables

# drop original variables
df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% select(-all_of(names_rev_scored_orig))
# recreate original variable names -> these are the recoded variables now!
df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% rename_with(~ gsub("_r", "", .x))

4 Measurement invariance

4.1 Well-being outcomes

Testing for measurement invariance of the latent traits across time.

4.1.1 Life satisfaction

Satisfaction with life scale (only the first four items - last one taps into a somewhat different construct)

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_swls <- '
# Define the latent factors
swls1 =~ NA*sw06_01_t1 + lambda1*sw06_01_t1 + sw06_02_t1 + sw06_03_t1 + sw06_04_t1
swls2 =~ NA*sw06_01_t2 + lambda1*sw06_01_t2 + sw06_02_t2 + sw06_03_t2 + sw06_04_t2

# Intercepts
sw06_01_t1 ~ i1*1
sw06_02_t1 ~ 1
sw06_03_t1 ~ 1
sw06_04_t1 ~ 1

sw06_01_t2 ~ i1*1
sw06_02_t2 ~ 1
sw06_03_t2 ~ 1
sw06_04_t2 ~ 1

# Unique Variances
sw06_01_t1 ~~ sw06_01_t1
sw06_02_t1 ~~ sw06_02_t1
sw06_03_t1 ~~ sw06_03_t1
sw06_04_t1 ~~ sw06_04_t1

sw06_01_t2 ~~ sw06_01_t2
sw06_02_t2 ~~ sw06_02_t2
sw06_03_t2 ~~ sw06_03_t2
sw06_04_t2 ~~ sw06_04_t2

# Latent Variable Means
swls1 ~ 0*1
swls2 ~ 1

# Latent Variable Variances and Covariance
swls1 ~~ 1*swls1
swls2 ~~ swls2
swls1 ~~ swls2
'
fit_configural_swls <- cfa(configural_swls, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_configural_swls, fit.measures = TRUE)

# Weak invariance model
weak_swls <- '
# Define the latent factors
swls1 =~ NA*sw06_01_t1 + lambda1*sw06_01_t1 + lambda2*sw06_02_t1 + lambda3*sw06_03_t1 + lambda4*sw06_04_t1
swls2 =~ NA*sw06_01_t2 + lambda1*sw06_01_t2 + lambda2*sw06_02_t2 + lambda3*sw06_03_t2 + lambda4*sw06_04_t2

# Intercepts
sw06_01_t1 ~ i1*1
sw06_02_t1 ~ 1
sw06_03_t1 ~ 1
sw06_04_t1 ~ 1

sw06_01_t2 ~ i1*1
sw06_02_t2 ~ 1
sw06_03_t2 ~ 1
sw06_04_t2 ~ 1

# Unique Variances
sw06_01_t1 ~~ sw06_01_t1
sw06_02_t1 ~~ sw06_02_t1
sw06_03_t1 ~~ sw06_03_t1
sw06_04_t1 ~~ sw06_04_t1

sw06_01_t2 ~~ sw06_01_t2
sw06_02_t2 ~~ sw06_02_t2
sw06_03_t2 ~~ sw06_03_t2
sw06_04_t2 ~~ sw06_04_t2

# Latent Variable Means
swls1 ~ 0*1
swls2 ~ 1

# Latent Variable Variances and Covariance
swls1 ~~ 1*swls1
swls2 ~~ swls2
swls1 ~~ swls2
'
fit_weak_swls <- cfa(weak_swls, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_weak_swls, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_swls <- '
# Define the latent factors
swls1 =~ NA*sw06_01_t1 + lambda1*sw06_01_t1 + lambda2*sw06_02_t1 + lambda3*sw06_03_t1 + lambda4*sw06_04_t1
swls2 =~ NA*sw06_01_t2 + lambda1*sw06_01_t2 + lambda2*sw06_02_t2 + lambda3*sw06_03_t2 + lambda4*sw06_04_t2

# Intercepts
sw06_01_t1 ~ i1*1
sw06_02_t1 ~ i2*1
sw06_03_t1 ~ i3*1
sw06_04_t1 ~ i4*1

sw06_01_t2 ~ i1*1
sw06_02_t2 ~ i2*1
sw06_03_t2 ~ i3*1
sw06_04_t2 ~ i4*1

# Unique Variances
sw06_01_t1 ~~ sw06_01_t1
sw06_02_t1 ~~ sw06_02_t1
sw06_03_t1 ~~ sw06_03_t1
sw06_04_t1 ~~ sw06_04_t1

sw06_01_t2 ~~ sw06_01_t2
sw06_02_t2 ~~ sw06_02_t2
sw06_03_t2 ~~ sw06_03_t2
sw06_04_t2 ~~ sw06_04_t2

# Latent Variable Means
swls1 ~ 0*1
swls2 ~ 1

# Latent Variable Variances and Covariance
swls1 ~~ 1*swls1
swls2 ~~ swls2
swls1 ~~ swls2
'
fit_strong_swls <- cfa(strong_swls, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_strong_swls, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_swls <- '
# Define the latent factors
swls1 =~ NA*sw06_01_t1 + lambda1*sw06_01_t1 + lambda2*sw06_02_t1 + lambda3*sw06_03_t1 + lambda4*sw06_04_t1
swls2 =~ NA*sw06_01_t2 + lambda1*sw06_01_t2 + lambda2*sw06_02_t2 + lambda3*sw06_03_t2 + lambda4*sw06_04_t2

# Intercepts
sw06_01_t1 ~ i1*1
sw06_02_t1 ~ i2*1
sw06_03_t1 ~ i3*1
sw06_04_t1 ~ i4*1

sw06_01_t2 ~ i1*1
sw06_02_t2 ~ i2*1
sw06_03_t2 ~ i3*1
sw06_04_t2 ~ i4*1

# Unique Variances
sw06_01_t1 ~~ u1*sw06_01_t1
sw06_02_t1 ~~ u2*sw06_02_t1
sw06_03_t1 ~~ u3*sw06_03_t1
sw06_04_t1 ~~ u4*sw06_04_t1

sw06_01_t2 ~~ u1*sw06_01_t2
sw06_02_t2 ~~ u2*sw06_02_t2
sw06_03_t2 ~~ u3*sw06_03_t2
sw06_04_t2 ~~ u4*sw06_04_t2

# Latent Variable Means
swls1 ~ 0*1
swls2 ~ 1

# Latent Variable Variances and Covariance
swls1 ~~ 1*swls1
swls2 ~~ swls2
swls1 ~~ swls2
'
fit_strict_swls <- cfa(strict_swls, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_strict_swls, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_swls) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_swls) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_swls) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_swls) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq    AIC    BIC   cfi   tli rmsea   srmr
  <chr>      <int> <dbl> <dbl>  <dbl>  <dbl> <dbl> <dbl> <dbl>  <dbl>
1 configural   530    25  241. 10427. 10534. 0.917 0.877 0.148 0.0442
2 weak         530    22  244. 10425. 10519. 0.917 0.894 0.138 0.0491
3 strong       530    19  244. 10419. 10500. 0.918 0.908 0.129 0.0491
4 strict       530    15  250. 10417. 10481. 0.917 0.920 0.120 0.0485
# chi-square difference test for nested models 
anova(fit_configural_swls, fit_weak_swls)

Chi-Squared Difference Test

                    Df   AIC   BIC  Chisq Chisq diff    RMSEA Df diff
fit_configural_swls 19 10427 10534 240.57                            
fit_weak_swls       22 10424 10518 243.84     3.2664 0.012944       3
                    Pr(>Chisq)
fit_configural_swls           
fit_weak_swls           0.3523
anova(fit_weak_swls, fit_strong_swls)

Chi-Squared Difference Test

                Df   AIC   BIC  Chisq Chisq diff RMSEA Df diff Pr(>Chisq)
fit_weak_swls   22 10424 10518 243.84                                    
fit_strong_swls 25 10419 10500 244.08    0.24147     0       3     0.9706
anova(fit_strong_swls, fit_strict_swls)

Chi-Squared Difference Test

                Df   AIC   BIC  Chisq Chisq diff    RMSEA Df diff Pr(>Chisq)
fit_strong_swls 25 10419 10500 244.08                                       
fit_strict_swls 29 10416 10481 249.85      5.771 0.028903       4     0.2169

Model fit relatively similar across model specifications. Chi^2 tests indicate that strict measurement invariance is given.

4.1.2 Meaning in life

For the 5-item subscale presence of meaning in life.

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_meaning <- '
# Define the latent factors
meaning1 =~ NA*ml01_01_t1 + lambda1*ml01_01_t1 + ml01_04_t1 + ml01_05_t1 + ml01_06_t1 + ml01_09_t1
meaning2 =~ NA*ml01_01_t2 + lambda1*ml01_01_t2 + ml01_04_t2 + ml01_05_t2 + ml01_06_t2 + ml01_09_t2

# Intercepts
ml01_01_t1 ~ i1*1
ml01_04_t1 ~ 1
ml01_05_t1 ~ 1
ml01_06_t1 ~ 1
ml01_09_t1 ~ 1

ml01_01_t2 ~ i1*1
ml01_04_t2 ~ 1
ml01_05_t2 ~ 1
ml01_06_t2 ~ 1
ml01_09_t2 ~ 1

# Unique Variances
ml01_01_t1 ~~ ml01_01_t1
ml01_04_t1 ~~ ml01_04_t1
ml01_05_t1 ~~ ml01_05_t1
ml01_06_t1 ~~ ml01_06_t1
ml01_09_t1 ~~ ml01_09_t1

ml01_01_t2 ~~ ml01_01_t2
ml01_04_t2 ~~ ml01_04_t2
ml01_05_t2 ~~ ml01_05_t2
ml01_06_t2 ~~ ml01_06_t2
ml01_09_t2 ~~ ml01_09_t2

# Latent Variable Means
meaning1 ~ 0*1
meaning2 ~ 1

# Latent Variable Variances and Covariance
meaning1 ~~ 1*meaning1
meaning2 ~~ meaning2
meaning1 ~~ meaning2
'
fit_configural_meaning <- cfa(configural_meaning, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_configural_meaning, fit.measures = TRUE)

# Weak invariance model
weak_meaning <- '
# Define the latent factors
meaning1 =~ NA*ml01_01_t1 + lambda1*ml01_01_t1 + lambda4*ml01_04_t1 + lambda5*ml01_05_t1 + lambda6*ml01_06_t1 + lambda9*ml01_09_t1
meaning2 =~ NA*ml01_01_t2 + lambda1*ml01_01_t2 + lambda4*ml01_04_t2 + lambda5*ml01_05_t2 + lambda6*ml01_06_t2 + lambda9*ml01_09_t2

# Intercepts
ml01_01_t1 ~ i1*1
ml01_04_t1 ~ 1
ml01_05_t1 ~ 1
ml01_06_t1 ~ 1
ml01_09_t1 ~ 1

ml01_01_t2 ~ i1*1
ml01_04_t2 ~ 1
ml01_05_t2 ~ 1
ml01_06_t2 ~ 1
ml01_09_t2 ~ 1

# Unique Variances
ml01_01_t1 ~~ ml01_01_t1
ml01_04_t1 ~~ ml01_04_t1
ml01_05_t1 ~~ ml01_05_t1
ml01_06_t1 ~~ ml01_06_t1
ml01_09_t1 ~~ ml01_09_t1

ml01_01_t2 ~~ ml01_01_t2
ml01_04_t2 ~~ ml01_04_t2
ml01_05_t2 ~~ ml01_05_t2
ml01_06_t2 ~~ ml01_06_t2
ml01_09_t2 ~~ ml01_09_t2

# Latent Variable Means
meaning1 ~ 0*1
meaning2 ~ 1

# Latent Variable Variances and Covariance
meaning1 ~~ 1*meaning1
meaning2 ~~ meaning2
meaning1 ~~ meaning2
'
fit_weak_meaning <- cfa(weak_meaning, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_weak_meaning, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_meaning <- '
# Define the latent factors
meaning1 =~ NA*ml01_01_t1 + lambda1*ml01_01_t1 + lambda4*ml01_04_t1 + lambda5*ml01_05_t1 + lambda6*ml01_06_t1 + lambda9*ml01_09_t1
meaning2 =~ NA*ml01_01_t2 + lambda1*ml01_01_t2 + lambda4*ml01_04_t2 + lambda5*ml01_05_t2 + lambda6*ml01_06_t2 + lambda9*ml01_09_t2

# Intercepts
ml01_01_t1 ~ i1*1
ml01_04_t1 ~ i2*1
ml01_05_t1 ~ i3*1
ml01_06_t1 ~ i4*1
ml01_09_t1 ~ i5*1

ml01_01_t2 ~ i1*1
ml01_04_t2 ~ i2*1
ml01_05_t2 ~ i3*1
ml01_06_t2 ~ i4*1
ml01_09_t2 ~ i5*1

# Unique Variances
ml01_01_t1 ~~ ml01_01_t1
ml01_04_t1 ~~ ml01_04_t1
ml01_05_t1 ~~ ml01_05_t1
ml01_06_t1 ~~ ml01_06_t1
ml01_09_t1 ~~ ml01_09_t1

ml01_01_t2 ~~ ml01_01_t2
ml01_04_t2 ~~ ml01_04_t2
ml01_05_t2 ~~ ml01_05_t2
ml01_06_t2 ~~ ml01_06_t2
ml01_09_t2 ~~ ml01_09_t2

# Latent Variable Means
meaning1 ~ 0*1
meaning2 ~ 1

# Latent Variable Variances and Covariance
meaning1 ~~ 1*meaning1
meaning2 ~~ meaning2
meaning1 ~~ meaning2
'
fit_strong_meaning <- cfa(strong_meaning, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_strong_meaning, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_meaning <- '
# Define the latent factors
meaning1 =~ NA*ml01_01_t1 + lambda1*ml01_01_t1 + lambda4*ml01_04_t1 + lambda5*ml01_05_t1 + lambda6*ml01_06_t1 + lambda9*ml01_09_t1
meaning2 =~ NA*ml01_01_t2 + lambda1*ml01_01_t2 + lambda4*ml01_04_t2 + lambda5*ml01_05_t2 + lambda6*ml01_06_t2 + lambda9*ml01_09_t2

# Intercepts
ml01_01_t1 ~ i1*1
ml01_04_t1 ~ i2*1
ml01_05_t1 ~ i3*1
ml01_06_t1 ~ i4*1
ml01_09_t1 ~ i5*1

ml01_01_t2 ~ i1*1
ml01_04_t2 ~ i2*1
ml01_05_t2 ~ i3*1
ml01_06_t2 ~ i4*1
ml01_09_t2 ~ i5*1

# Unique Variances
ml01_01_t1 ~~ u1*ml01_01_t1
ml01_04_t1 ~~ u2*ml01_04_t1
ml01_05_t1 ~~ u3*ml01_05_t1
ml01_06_t1 ~~ u4*ml01_06_t1
ml01_09_t1 ~~ u5*ml01_09_t1

ml01_01_t2 ~~ u1*ml01_01_t2
ml01_04_t2 ~~ u2*ml01_04_t2
ml01_05_t2 ~~ u3*ml01_05_t2
ml01_06_t2 ~~ u4*ml01_06_t2
ml01_09_t2 ~~ u5*ml01_09_t2

# Latent Variable Means
meaning1 ~ 0*1
meaning2 ~ 1

# Latent Variable Variances and Covariance
meaning1 ~~ 1*meaning1
meaning2 ~~ meaning2
meaning1 ~~ meaning2
'
fit_strict_meaning <- cfa(strict_meaning, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_strict_meaning, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_meaning) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_meaning) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_meaning) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_meaning) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq    AIC    BIC   cfi   tli  rmsea   srmr
  <chr>      <int> <dbl> <dbl>  <dbl>  <dbl> <dbl> <dbl>  <dbl>  <dbl>
1 configural   530    31  187. 15516. 15649. 0.963 0.952 0.0920 0.0290
2 weak         530    27  188. 15510. 15625. 0.964 0.957 0.0864 0.0305
3 strong       530    23  192. 15506. 15604. 0.964 0.961 0.0822 0.0324
4 strict       530    18  200. 15504. 15580. 0.963 0.965 0.0784 0.0341
# chi-square difference test for nested models 
anova(fit_configural_meaning, fit_weak_meaning)

Chi-Squared Difference Test

                       Df   AIC   BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_meaning 34 15516 15649 186.51                         
fit_weak_meaning       38 15510 15625 188.43     1.9237     0       4
                       Pr(>Chisq)
fit_configural_meaning           
fit_weak_meaning           0.7498
anova(fit_weak_meaning, fit_strong_meaning)

Chi-Squared Difference Test

                   Df   AIC   BIC  Chisq Chisq diff RMSEA Df diff Pr(>Chisq)
fit_weak_meaning   38 15510 15625 188.43                                    
fit_strong_meaning 42 15506 15604 192.31     3.8822     0       4     0.4222
anova(fit_strong_meaning, fit_strict_meaning)

Chi-Squared Difference Test

                   Df   AIC   BIC  Chisq Chisq diff    RMSEA Df diff Pr(>Chisq)
fit_strong_meaning 42 15506 15604 192.31                                       
fit_strict_meaning 47 15504 15580 199.98     7.6659 0.031718       5     0.1756

Good model fit across all model specifications. Chi^2 tests indicate that strict measurement invariance is given.

4.1.2.1 Search for Meaning

For the 5-item subscale search for meaning in life.

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_search <- '
# Define the latent factors
search1 =~ NA*ml01_02_t1 + lambda1*ml01_02_t1 + ml01_03_t1 + ml01_07_t1 + ml01_08_t1 + ml01_10_t1
search2 =~ NA*ml01_02_t2 + lambda1*ml01_02_t2 + ml01_03_t2 + ml01_07_t2 + ml01_08_t2 + ml01_10_t2

# Intercepts
ml01_02_t1 ~ i1*1
ml01_03_t1 ~ 1
ml01_07_t1 ~ 1
ml01_08_t1 ~ 1
ml01_10_t1 ~ 1

ml01_02_t2 ~ i1*1
ml01_03_t2 ~ 1
ml01_07_t2 ~ 1
ml01_08_t2 ~ 1
ml01_10_t2 ~ 1

# Unique Variances
ml01_02_t1 ~~ ml01_02_t1
ml01_03_t1 ~~ ml01_03_t1
ml01_07_t1 ~~ ml01_07_t1
ml01_08_t1 ~~ ml01_08_t1
ml01_10_t1 ~~ ml01_10_t1

ml01_02_t2 ~~ ml01_02_t2
ml01_03_t2 ~~ ml01_03_t2
ml01_07_t2 ~~ ml01_07_t2
ml01_08_t2 ~~ ml01_08_t2
ml01_10_t2 ~~ ml01_10_t2

# Latent Variable Means
search1 ~ 0*1
search2 ~ 1

# Latent Variable Variances and Covariance
search1 ~~ 1*search1
search2 ~~ search2
search1 ~~ search2
'
fit_configural_search <- cfa(configural_search, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_configural_search, fit.measures = TRUE)

# Weak invariance model
weak_search <- '
# Define the latent factors
search1 =~ NA*ml01_02_t1 + lambda1*ml01_02_t1 + lambda4*ml01_03_t1 + lambda5*ml01_07_t1 + lambda6*ml01_08_t1 + lambda9*ml01_10_t1
search2 =~ NA*ml01_02_t2 + lambda1*ml01_02_t2 + lambda4*ml01_03_t2 + lambda5*ml01_07_t2 + lambda6*ml01_08_t2 + lambda9*ml01_10_t2

# Intercepts
ml01_02_t1 ~ i1*1
ml01_03_t1 ~ 1
ml01_07_t1 ~ 1
ml01_08_t1 ~ 1
ml01_10_t1 ~ 1

ml01_02_t2 ~ i1*1
ml01_03_t2 ~ 1
ml01_07_t2 ~ 1
ml01_08_t2 ~ 1
ml01_10_t2 ~ 1

# Unique Variances
ml01_02_t1 ~~ ml01_02_t1
ml01_03_t1 ~~ ml01_03_t1
ml01_07_t1 ~~ ml01_07_t1
ml01_08_t1 ~~ ml01_08_t1
ml01_10_t1 ~~ ml01_10_t1

ml01_02_t2 ~~ ml01_02_t2
ml01_03_t2 ~~ ml01_03_t2
ml01_07_t2 ~~ ml01_07_t2
ml01_08_t2 ~~ ml01_08_t2
ml01_10_t2 ~~ ml01_10_t2

# Latent Variable Means
search1 ~ 0*1
search2 ~ 1

# Latent Variable Variances and Covariance
search1 ~~ 1*search1
search2 ~~ search2
search1 ~~ search2
'
fit_weak_search <- cfa(weak_search, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_weak_search, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_search <- '
# Define the latent factors
search1 =~ NA*ml01_02_t1 + lambda1*ml01_02_t1 + lambda4*ml01_03_t1 + lambda5*ml01_07_t1 + lambda6*ml01_08_t1 + lambda9*ml01_10_t1
search2 =~ NA*ml01_02_t2 + lambda1*ml01_02_t2 + lambda4*ml01_03_t2 + lambda5*ml01_07_t2 + lambda6*ml01_08_t2 + lambda9*ml01_10_t2

# Intercepts
ml01_02_t1 ~ i1*1
ml01_03_t1 ~ i2*1
ml01_07_t1 ~ i3*1
ml01_08_t1 ~ i4*1
ml01_10_t1 ~ i5*1

ml01_02_t2 ~ i1*1
ml01_03_t2 ~ i2*1
ml01_07_t2 ~ i3*1
ml01_08_t2 ~ i4*1
ml01_10_t2 ~ i5*1

# Unique Variances
ml01_02_t1 ~~ ml01_02_t1
ml01_03_t1 ~~ ml01_03_t1
ml01_07_t1 ~~ ml01_07_t1
ml01_08_t1 ~~ ml01_08_t1
ml01_10_t1 ~~ ml01_10_t1

ml01_02_t2 ~~ ml01_02_t2
ml01_03_t2 ~~ ml01_03_t2
ml01_07_t2 ~~ ml01_07_t2
ml01_08_t2 ~~ ml01_08_t2
ml01_10_t2 ~~ ml01_10_t2

# Latent Variable Means
search1 ~ 0*1
search2 ~ 1

# Latent Variable Variances and Covariance
search1 ~~ 1*search1
search2 ~~ search2
search1 ~~ search2
'
fit_strong_search <- cfa(strong_search, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_strong_search, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_search <- '
# Define the latent factors
search1 =~ NA*ml01_02_t1 + lambda1*ml01_02_t1 + lambda4*ml01_03_t1 + lambda5*ml01_07_t1 + lambda6*ml01_08_t1 + lambda9*ml01_10_t1
search2 =~ NA*ml01_02_t2 + lambda1*ml01_02_t2 + lambda4*ml01_03_t2 + lambda5*ml01_07_t2 + lambda6*ml01_08_t2 + lambda9*ml01_10_t2

# Intercepts
ml01_02_t1 ~ i1*1
ml01_03_t1 ~ i2*1
ml01_07_t1 ~ i3*1
ml01_08_t1 ~ i4*1
ml01_10_t1 ~ i5*1

ml01_02_t2 ~ i1*1
ml01_03_t2 ~ i2*1
ml01_07_t2 ~ i3*1
ml01_08_t2 ~ i4*1
ml01_10_t2 ~ i5*1

# Unique Variances
ml01_02_t1 ~~ u1*ml01_02_t1
ml01_03_t1 ~~ u2*ml01_03_t1
ml01_07_t1 ~~ u3*ml01_07_t1
ml01_08_t1 ~~ u4*ml01_08_t1
ml01_10_t1 ~~ u5*ml01_10_t1

ml01_02_t2 ~~ u1*ml01_02_t2
ml01_03_t2 ~~ u2*ml01_03_t2
ml01_07_t2 ~~ u3*ml01_07_t2
ml01_08_t2 ~~ u4*ml01_08_t2
ml01_10_t2 ~~ u5*ml01_10_t2

# Latent Variable Means
search1 ~ 0*1
search2 ~ 1

# Latent Variable Variances and Covariance
search1 ~~ 1*search1
search2 ~~ search2
search1 ~~ search2
'
fit_strict_search <- cfa(strict_search, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_strict_search, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_search) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_search) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_search) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_search) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq    AIC    BIC   cfi   tli  rmsea   srmr
  <chr>      <int> <dbl> <dbl>  <dbl>  <dbl> <dbl> <dbl>  <dbl>  <dbl>
1 configural   530    31  252. 15343. 15475. 0.947 0.930 0.110  0.0302
2 weak         530    27  254. 15337. 15453. 0.947 0.938 0.104  0.0332
3 strong       530    23  257. 15332. 15430. 0.948 0.944 0.0982 0.0342
4 strict       530    18  267. 15332. 15409. 0.947 0.949 0.0939 0.0376
# chi-square difference test for nested models 
anova(fit_configural_search, fit_weak_search)

Chi-Squared Difference Test

                      Df   AIC   BIC  Chisq Chisq diff RMSEA Df diff Pr(>Chisq)
fit_configural_search 34 15343 15475 251.59                                    
fit_weak_search       38 15337 15453 254.27     2.6781     0       4     0.6131
anova(fit_weak_search, fit_strong_search)

Chi-Squared Difference Test

                  Df   AIC   BIC  Chisq Chisq diff RMSEA Df diff Pr(>Chisq)
fit_weak_search   38 15337 15453 254.27                                    
fit_strong_search 42 15332 15430 256.81     2.5436     0       4     0.6368
anova(fit_strong_search, fit_strict_search)

Chi-Squared Difference Test

                  Df   AIC   BIC  Chisq Chisq diff    RMSEA Df diff Pr(>Chisq)
fit_strong_search 42 15332 15430 256.81                                       
fit_strict_search 47 15332 15409 266.81     9.9916 0.043401       5    0.07547
                   
fit_strong_search  
fit_strict_search .
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Good model fit across all model specifications (except for RMSEA but almost…). Chi^2 tests indicate that strict measurement invariance is given.

4.1.3 Self-esteem

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_selfes <- '
# Define the latent factors
selfes1 =~ NA*selfes_par1_t1 + lambda1*selfes_par1_t1 + selfes_par2_t1 + selfes_par3_t1
selfes2 =~ NA*selfes_par1_t2 + lambda1*selfes_par1_t2 + selfes_par2_t2 + selfes_par3_t2

# Intercepts
selfes_par1_t1 ~ i1*1
selfes_par2_t1 ~ 1
selfes_par3_t1 ~ 1

selfes_par1_t2 ~ i1*1
selfes_par2_t2 ~ 1
selfes_par3_t2 ~ 1

# Unique Variances
selfes_par1_t1 ~~ selfes_par1_t1
selfes_par2_t1 ~~ selfes_par2_t1
selfes_par3_t1 ~~ selfes_par3_t1

selfes_par1_t2 ~~ selfes_par1_t2
selfes_par2_t2 ~~ selfes_par2_t2
selfes_par3_t2 ~~ selfes_par3_t2

# Latent Variable Means
selfes1 ~ 0*1
selfes2 ~ 1

# Latent Variable Variances and Covariance
selfes1 ~~ 1*selfes1
selfes2 ~~ selfes2
selfes1 ~~ selfes2
'
fit_configural_selfes <- cfa(configural_selfes, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_configural_selfes, fit.measures = TRUE)

# Weak invariance model
weak_selfes <- '
# Define the latent factors
selfes1 =~ NA*selfes_par1_t1 + lambda1*selfes_par1_t1 + lambda2*selfes_par2_t1 + lambda3*selfes_par3_t1
selfes2 =~ NA*selfes_par1_t2 + lambda1*selfes_par1_t2 + lambda2*selfes_par2_t2 + lambda3*selfes_par3_t2

# Intercepts
selfes_par1_t1 ~ i1*1
selfes_par2_t1 ~ 1
selfes_par3_t1 ~ 1

selfes_par1_t2 ~ i1*1
selfes_par2_t2 ~ 1
selfes_par3_t2 ~ 1

# Unique Variances
selfes_par1_t1 ~~ selfes_par1_t1
selfes_par2_t1 ~~ selfes_par2_t1
selfes_par3_t1 ~~ selfes_par3_t1

selfes_par1_t2 ~~ selfes_par1_t2
selfes_par2_t2 ~~ selfes_par2_t2
selfes_par3_t2 ~~ selfes_par3_t2

# Latent Variable Means
selfes1 ~ 0*1
selfes2 ~ 1

# Latent Variable Variances and Covariance
selfes1 ~~ 1*selfes1
selfes2 ~~ selfes2
selfes1 ~~ selfes2
'
fit_weak_selfes <- cfa(weak_selfes, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_weak_selfes, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_selfes <- '
# Define the latent factors
selfes1 =~ NA*selfes_par1_t1 + lambda1*selfes_par1_t1 + lambda2*selfes_par2_t1 + lambda3*selfes_par3_t1
selfes2 =~ NA*selfes_par1_t2 + lambda1*selfes_par1_t2 + lambda2*selfes_par2_t2 + lambda3*selfes_par3_t2

# Intercepts
selfes_par1_t1 ~ i1*1
selfes_par2_t1 ~ i2*1
selfes_par3_t1 ~ i3*1

selfes_par1_t2 ~ i1*1
selfes_par2_t2 ~ i2*1
selfes_par3_t2 ~ i3*1

# Unique Variances
selfes_par1_t1 ~~ selfes_par1_t1
selfes_par2_t1 ~~ selfes_par2_t1
selfes_par3_t1 ~~ selfes_par3_t1

selfes_par1_t2 ~~ selfes_par1_t2
selfes_par2_t2 ~~ selfes_par2_t2
selfes_par3_t2 ~~ selfes_par3_t2

# Latent Variable Means
selfes1 ~ 0*1
selfes2 ~ 1

# Latent Variable Variances and Covariance
selfes1 ~~ 1*selfes1
selfes2 ~~ selfes2
selfes1 ~~ selfes2
'
fit_strong_selfes <- cfa(strong_selfes, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_strong_selfes, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_selfes <- '
# Define the latent factors
selfes1 =~ NA*selfes_par1_t1 + lambda1*selfes_par1_t1 + lambda2*selfes_par2_t1 + lambda3*selfes_par3_t1
selfes2 =~ NA*selfes_par1_t2 + lambda1*selfes_par1_t2 + lambda2*selfes_par2_t2 + lambda3*selfes_par3_t2

# Intercepts
selfes_par1_t1 ~ i1*1
selfes_par2_t1 ~ i2*1
selfes_par3_t1 ~ i3*1

selfes_par1_t2 ~ i1*1
selfes_par2_t2 ~ i2*1
selfes_par3_t2 ~ i3*1

# Unique Variances
selfes_par1_t1 ~~ u1*selfes_par1_t1
selfes_par2_t1 ~~ u2*selfes_par2_t1
selfes_par3_t1 ~~ u3*selfes_par3_t1

selfes_par1_t2 ~~ u1*selfes_par1_t2
selfes_par2_t2 ~~ u2*selfes_par2_t2
selfes_par3_t2 ~~ u3*selfes_par3_t2

# Latent Variable Means
selfes1 ~ 0*1
selfes2 ~ 1

# Latent Variable Variances and Covariance
selfes1 ~~ 1*selfes1
selfes2 ~~ selfes2
selfes1 ~~ selfes2
'
fit_strict_selfes <- cfa(strict_selfes, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_strict_selfes, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_selfes) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_selfes) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_selfes) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_selfes) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>
1 configural   530    19  164. 5667. 5748. 0.945 0.897 0.192 0.0275
2 weak         530    17  164. 5663. 5736. 0.946 0.919 0.170 0.0278
3 strong       530    15  164. 5660. 5724. 0.946 0.933 0.155 0.0282
4 strict       530    12  167. 5656. 5708. 0.946 0.946 0.138 0.0311
# chi-square difference test for nested models 
anova(fit_configural_selfes, fit_weak_selfes)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_selfes  8 5667.1 5748.2 163.71                         
fit_weak_selfes       10 5663.1 5735.8 163.79   0.081813     0       2
                      Pr(>Chisq)
fit_configural_selfes           
fit_weak_selfes           0.9599
anova(fit_weak_selfes, fit_strong_selfes)

Chi-Squared Difference Test

                  Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff Pr(>Chisq)
fit_weak_selfes   10 5663.1 5735.8 163.79                                    
fit_strong_selfes 12 5659.5 5723.6 164.20    0.40974     0       2     0.8148
anova(fit_strong_selfes, fit_strict_selfes)

Chi-Squared Difference Test

                  Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff Pr(>Chisq)
fit_strong_selfes 12 5659.5 5723.6 164.20                                    
fit_strict_selfes 15 5656.3 5707.6 166.98     2.7759     0       3     0.4275

Good model fit across all model specifications (except for RMSEA). Chi^2 tests indicate that strict measurement invariance is given.

4.1.4 Self concept clarity

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_concept <- '
# Define the latent factors
concept1 =~ NA*concept_par1_t1 + lambda1*concept_par1_t1 + concept_par2_t1 + concept_par3_t1
concept2 =~ NA*concept_par1_t2 + lambda1*concept_par1_t2 + concept_par2_t2 + concept_par3_t2

# Intercepts
concept_par1_t1 ~ i1*1
concept_par2_t1 ~ 1
concept_par3_t1 ~ 1

concept_par1_t2 ~ i1*1
concept_par2_t2 ~ 1
concept_par3_t2 ~ 1

# Unique Variances
concept_par1_t1 ~~ concept_par1_t1
concept_par2_t1 ~~ concept_par2_t1
concept_par3_t1 ~~ concept_par3_t1

concept_par1_t2 ~~ concept_par1_t2
concept_par2_t2 ~~ concept_par2_t2
concept_par3_t2 ~~ concept_par3_t2

# Latent Variable Means
concept1 ~ 0*1
concept2 ~ 1

# Latent Variable Variances and Covariance
concept1 ~~ 1*concept1
concept2 ~~ concept2
concept1 ~~ concept2
'
fit_configural_concept <- cfa(configural_concept, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_configural_concept, fit.measures = TRUE)

# Weak invariance model
weak_concept <- '
# Define the latent factors
concept1 =~ NA*concept_par1_t1 + lambda1*concept_par1_t1 + lambda2*concept_par2_t1 + lambda3*concept_par3_t1
concept2 =~ NA*concept_par1_t2 + lambda1*concept_par1_t2 + lambda2*concept_par2_t2 + lambda3*concept_par3_t2

# Intercepts
concept_par1_t1 ~ i1*1
concept_par2_t1 ~ 1
concept_par3_t1 ~ 1

concept_par1_t2 ~ i1*1
concept_par2_t2 ~ 1
concept_par3_t2 ~ 1

# Unique Variances
concept_par1_t1 ~~ concept_par1_t1
concept_par2_t1 ~~ concept_par2_t1
concept_par3_t1 ~~ concept_par3_t1

concept_par1_t2 ~~ concept_par1_t2
concept_par2_t2 ~~ concept_par2_t2
concept_par3_t2 ~~ concept_par3_t2

# Latent Variable Means
concept1 ~ 0*1
concept2 ~ 1

# Latent Variable Variances and Covariance
concept1 ~~ 1*concept1
concept2 ~~ concept2
concept1 ~~ concept2
'
fit_weak_concept <- cfa(weak_concept, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_weak_concept, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_concept <- '
# Define the latent factors
concept1 =~ NA*concept_par1_t1 + lambda1*concept_par1_t1 + lambda2*concept_par2_t1 + lambda3*concept_par3_t1
concept2 =~ NA*concept_par1_t2 + lambda1*concept_par1_t2 + lambda2*concept_par2_t2 + lambda3*concept_par3_t2

# Intercepts
concept_par1_t1 ~ i1*1
concept_par2_t1 ~ i2*1
concept_par3_t1 ~ i3*1

concept_par1_t2 ~ i1*1
concept_par2_t2 ~ i2*1
concept_par3_t2 ~ i3*1

# Unique Variances
concept_par1_t1 ~~ concept_par1_t1
concept_par2_t1 ~~ concept_par2_t1
concept_par3_t1 ~~ concept_par3_t1

concept_par1_t2 ~~ concept_par1_t2
concept_par2_t2 ~~ concept_par2_t2
concept_par3_t2 ~~ concept_par3_t2

# Latent Variable Means
concept1 ~ 0*1
concept2 ~ 1

# Latent Variable Variances and Covariance
concept1 ~~ 1*concept1
concept2 ~~ concept2
concept1 ~~ concept2
'
fit_strong_concept <- cfa(strong_concept, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_strong_concept, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_concept <- '
# Define the latent factors
concept1 =~ NA*concept_par1_t1 + lambda1*concept_par1_t1 + lambda2*concept_par2_t1 + lambda3*concept_par3_t1
concept2 =~ NA*concept_par1_t2 + lambda1*concept_par1_t2 + lambda2*concept_par2_t2 + lambda3*concept_par3_t2

# Intercepts
concept_par1_t1 ~ i1*1
concept_par2_t1 ~ i2*1
concept_par3_t1 ~ i3*1

concept_par1_t2 ~ i1*1
concept_par2_t2 ~ i2*1
concept_par3_t2 ~ i3*1

# Unique Variances
concept_par1_t1 ~~ u1*concept_par1_t1
concept_par2_t1 ~~ u2*concept_par2_t1
concept_par3_t1 ~~ u3*concept_par3_t1

concept_par1_t2 ~~ u1*concept_par1_t2
concept_par2_t2 ~~ u2*concept_par2_t2
concept_par3_t2 ~~ u3*concept_par3_t2

# Latent Variable Means
concept1 ~ 0*1
concept2 ~ 1

# Latent Variable Variances and Covariance
concept1 ~~ 1*concept1
concept2 ~~ concept2
concept1 ~~ concept2
'
fit_strict_concept <- cfa(strict_concept, data = df_sbsa3_wide_wb, mimic = "mplus", missing="ML")
summary(fit_strict_concept, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_concept) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_concept) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_concept) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_concept) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>
1 configural   530    19  139. 5953. 6034. 0.948 0.903 0.176 0.0253
2 weak         530    17  140. 5950. 6023. 0.949 0.923 0.157 0.0290
3 strong       530    15  140. 5946. 6010. 0.949 0.937 0.142 0.0294
4 strict       530    12  143. 5944. 5995. 0.949 0.949 0.127 0.0394
# chi-square difference test for nested models 
anova(fit_configural_concept, fit_weak_concept)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_concept  8 5953.2 6034.4 138.71                         
fit_weak_concept       10 5950.3 6023.0 139.81     1.1045     0       2
                       Pr(>Chisq)
fit_configural_concept           
fit_weak_concept           0.5756
anova(fit_weak_concept, fit_strong_concept)

Chi-Squared Difference Test

                   Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff Pr(>Chisq)
fit_weak_concept   10 5950.3 6023.0 139.81                                    
fit_strong_concept 12 5946.4 6010.5 139.86   0.052991     0       2     0.9739
anova(fit_strong_concept, fit_strict_concept)

Chi-Squared Difference Test

                   Df    AIC    BIC  Chisq Chisq diff    RMSEA Df diff
fit_strong_concept 12 5946.4 6010.5 139.86                            
fit_strict_concept 15 5943.8 5995.1 143.27     3.4048 0.015956       3
                   Pr(>Chisq)
fit_strong_concept           
fit_strict_concept     0.3333

Good model fit across all model specifications (except for RMSEA). Chi^2 tests indicate that strict measurement invariance is given.

4.2 Big Five

Testing for measurement invariance of the latent BFI-2 traits across time.

4.2.1 Extraversion: current personality

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_extra_curr <- '
# Define the latent factors
extra_curr1 =~ NA*extra_curr_par1_t1 + lambda1*extra_curr_par1_t1 + extra_curr_par2_t1 + extra_curr_par3_t1
extra_curr2 =~ NA*extra_curr_par1_t2 + lambda1*extra_curr_par1_t2 + extra_curr_par2_t2 + extra_curr_par3_t2

# Intercepts
extra_curr_par1_t1 ~ i1*1
extra_curr_par2_t1 ~ 1
extra_curr_par3_t1 ~ 1

extra_curr_par1_t2 ~ i1*1
extra_curr_par2_t2 ~ 1
extra_curr_par3_t2 ~ 1

# Unique Variances
extra_curr_par1_t1 ~~ extra_curr_par1_t1
extra_curr_par2_t1 ~~ extra_curr_par2_t1
extra_curr_par3_t1 ~~ extra_curr_par3_t1

extra_curr_par1_t2 ~~ extra_curr_par1_t2
extra_curr_par2_t2 ~~ extra_curr_par2_t2
extra_curr_par3_t2 ~~ extra_curr_par3_t2

# Latent Variable Means
extra_curr1 ~ 0*1
extra_curr2 ~ 1

# Latent Variable Variances and Covariance
extra_curr1 ~~ 1*extra_curr1
extra_curr2 ~~ extra_curr2
extra_curr1 ~~ extra_curr2
'
fit_configural_extra_curr <- cfa(configural_extra_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_configural_extra_curr, fit.measures = TRUE)

# Weak invariance model
weak_extra_curr <- '
# Define the latent factors
extra_curr1 =~ NA*extra_curr_par1_t1 + lambda1*extra_curr_par1_t1 + lambda2*extra_curr_par2_t1 + lambda3*extra_curr_par3_t1
extra_curr2 =~ NA*extra_curr_par1_t2 + lambda1*extra_curr_par1_t2 + lambda2*extra_curr_par2_t2 + lambda3*extra_curr_par3_t2

# Intercepts
extra_curr_par1_t1 ~ i1*1
extra_curr_par2_t1 ~ 1
extra_curr_par3_t1 ~ 1

extra_curr_par1_t2 ~ i1*1
extra_curr_par2_t2 ~ 1
extra_curr_par3_t2 ~ 1

# Unique Variances
extra_curr_par1_t1 ~~ extra_curr_par1_t1
extra_curr_par2_t1 ~~ extra_curr_par2_t1
extra_curr_par3_t1 ~~ extra_curr_par3_t1

extra_curr_par1_t2 ~~ extra_curr_par1_t2
extra_curr_par2_t2 ~~ extra_curr_par2_t2
extra_curr_par3_t2 ~~ extra_curr_par3_t2

# Latent Variable Means
extra_curr1 ~ 0*1
extra_curr2 ~ 1

# Latent Variable Variances and Covariance
extra_curr1 ~~ 1*extra_curr1
extra_curr2 ~~ extra_curr2
extra_curr1 ~~ extra_curr2
'
fit_weak_extra_curr <- cfa(weak_extra_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_weak_extra_curr, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_extra_curr <- '
# Define the latent factors
extra_curr1 =~ NA*extra_curr_par1_t1 + lambda1*extra_curr_par1_t1 + lambda2*extra_curr_par2_t1 + lambda3*extra_curr_par3_t1
extra_curr2 =~ NA*extra_curr_par1_t2 + lambda1*extra_curr_par1_t2 + lambda2*extra_curr_par2_t2 + lambda3*extra_curr_par3_t2

# Intercepts
extra_curr_par1_t1 ~ i1*1
extra_curr_par2_t1 ~ i2*1
extra_curr_par3_t1 ~ i3*1

extra_curr_par1_t2 ~ i1*1
extra_curr_par2_t2 ~ i2*1
extra_curr_par3_t2 ~ i3*1

# Unique Variances
extra_curr_par1_t1 ~~ extra_curr_par1_t1
extra_curr_par2_t1 ~~ extra_curr_par2_t1
extra_curr_par3_t1 ~~ extra_curr_par3_t1

extra_curr_par1_t2 ~~ extra_curr_par1_t2
extra_curr_par2_t2 ~~ extra_curr_par2_t2
extra_curr_par3_t2 ~~ extra_curr_par3_t2

# Latent Variable Means
extra_curr1 ~ 0*1
extra_curr2 ~ 1

# Latent Variable Variances and Covariance
extra_curr1 ~~ 1*extra_curr1
extra_curr2 ~~ extra_curr2
extra_curr1 ~~ extra_curr2
'
fit_strong_extra_curr <- cfa(strong_extra_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_strong_extra_curr, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_extra_curr <- '
# Define the latent factors
extra_curr1 =~ NA*extra_curr_par1_t1 + lambda1*extra_curr_par1_t1 + lambda2*extra_curr_par2_t1 + lambda3*extra_curr_par3_t1
extra_curr2 =~ NA*extra_curr_par1_t2 + lambda1*extra_curr_par1_t2 + lambda2*extra_curr_par2_t2 + lambda3*extra_curr_par3_t2

# Intercepts
extra_curr_par1_t1 ~ i1*1
extra_curr_par2_t1 ~ i2*1
extra_curr_par3_t1 ~ i3*1

extra_curr_par1_t2 ~ i1*1
extra_curr_par2_t2 ~ i2*1
extra_curr_par3_t2 ~ i3*1

# Unique Variances
extra_curr_par1_t1 ~~ u1*extra_curr_par1_t1
extra_curr_par2_t1 ~~ u2*extra_curr_par2_t1
extra_curr_par3_t1 ~~ u3*extra_curr_par3_t1

extra_curr_par1_t2 ~~ u1*extra_curr_par1_t2
extra_curr_par2_t2 ~~ u2*extra_curr_par2_t2
extra_curr_par3_t2 ~~ u3*extra_curr_par3_t2

# Latent Variable Means
extra_curr1 ~ 0*1
extra_curr2 ~ 1

# Latent Variable Variances and Covariance
extra_curr1 ~~ 1*extra_curr1
extra_curr2 ~~ extra_curr2
extra_curr1 ~~ extra_curr2
'
fit_strict_extra_curr <- cfa(strict_extra_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_strict_extra_curr, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_extra_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_extra_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_extra_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_extra_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>
1 configural   530    19  216. 5133. 5214. 0.919 0.848 0.222 0.0350
2 weak         530    17  218. 5130. 5203. 0.919 0.879 0.198 0.0390
3 strong       530    15  218. 5127. 5191. 0.920 0.899 0.180 0.0385
4 strict       530    12  221. 5124. 5175. 0.920 0.920 0.161 0.0409
# chi-square difference test for nested models 
anova(fit_configural_extra_curr, fit_weak_extra_curr)

Chi-Squared Difference Test

                          Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_extra_curr  8 5132.8 5214.0 216.27                         
fit_weak_extra_curr       10 5130.1 5202.7 217.54     1.2659     0       2
                          Pr(>Chisq)
fit_configural_extra_curr           
fit_weak_extra_curr            0.531
anova(fit_weak_extra_curr, fit_strong_extra_curr)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_weak_extra_curr   10 5130.1 5202.7 217.54                         
fit_strong_extra_curr 12 5127.0 5191.1 218.47    0.92766     0       2
                      Pr(>Chisq)
fit_weak_extra_curr             
fit_strong_extra_curr     0.6289
anova(fit_strong_extra_curr, fit_strict_extra_curr)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_strong_extra_curr 12 5127.0 5191.1 218.47                         
fit_strict_extra_curr 15 5123.8 5175.1 221.23     2.7637     0       3
                      Pr(>Chisq)
fit_strong_extra_curr           
fit_strict_extra_curr     0.4295

Model fit satisfactory, except for RMSEA. Chi^2 tests indicate that strict measurement invariance is given.

4.2.2 Extraversion: ideal personality

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_extra_ideal <- '
# Define the latent factors
extra_ideal1 =~ NA*extra_ideal_par1_t1 + lambda1*extra_ideal_par1_t1 + extra_ideal_par2_t1 + extra_ideal_par3_t1
extra_ideal2 =~ NA*extra_ideal_par1_t2 + lambda1*extra_ideal_par1_t2 + extra_ideal_par2_t2 + extra_ideal_par3_t2

# Intercepts
extra_ideal_par1_t1 ~ i1*1
extra_ideal_par2_t1 ~ 1
extra_ideal_par3_t1 ~ 1

extra_ideal_par1_t2 ~ i1*1
extra_ideal_par2_t2 ~ 1
extra_ideal_par3_t2 ~ 1

# Unique Variances
extra_ideal_par1_t1 ~~ extra_ideal_par1_t1
extra_ideal_par2_t1 ~~ extra_ideal_par2_t1
extra_ideal_par3_t1 ~~ extra_ideal_par3_t1

extra_ideal_par1_t2 ~~ extra_ideal_par1_t2
extra_ideal_par2_t2 ~~ extra_ideal_par2_t2
extra_ideal_par3_t2 ~~ extra_ideal_par3_t2

# Latent Variable Means
extra_ideal1 ~ 0*1
extra_ideal2 ~ 1

# Latent Variable Variances and Covariance
extra_ideal1 ~~ 1*extra_ideal1
extra_ideal2 ~~ extra_ideal2
extra_ideal1 ~~ extra_ideal2
'
fit_configural_extra_ideal <- cfa(configural_extra_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), 
                                  mimic = "mplus", missing="ML")
summary(fit_configural_extra_ideal, fit.measures = TRUE)

# Weak invariance model
weak_extra_ideal <- '
# Define the latent factors
extra_ideal1 =~ NA*extra_ideal_par1_t1 + lambda1*extra_ideal_par1_t1 + lambda2*extra_ideal_par2_t1 + lambda3*extra_ideal_par3_t1
extra_ideal2 =~ NA*extra_ideal_par1_t2 + lambda1*extra_ideal_par1_t2 + lambda2*extra_ideal_par2_t2 + lambda3*extra_ideal_par3_t2

# Intercepts
extra_ideal_par1_t1 ~ i1*1
extra_ideal_par2_t1 ~ 1
extra_ideal_par3_t1 ~ 1

extra_ideal_par1_t2 ~ i1*1
extra_ideal_par2_t2 ~ 1
extra_ideal_par3_t2 ~ 1

# Unique Variances
extra_ideal_par1_t1 ~~ extra_ideal_par1_t1
extra_ideal_par2_t1 ~~ extra_ideal_par2_t1
extra_ideal_par3_t1 ~~ extra_ideal_par3_t1

extra_ideal_par1_t2 ~~ extra_ideal_par1_t2
extra_ideal_par2_t2 ~~ extra_ideal_par2_t2
extra_ideal_par3_t2 ~~ extra_ideal_par3_t2

# Latent Variable Means
extra_ideal1 ~ 0*1
extra_ideal2 ~ 1

# Latent Variable Variances and Covariance
extra_ideal1 ~~ 1*extra_ideal1
extra_ideal2 ~~ extra_ideal2
extra_ideal1 ~~ extra_ideal2
'
fit_weak_extra_ideal <- cfa(weak_extra_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), 
                            mimic = "mplus", missing="ML")
summary(fit_weak_extra_ideal, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_extra_ideal <- '
# Define the latent factors
extra_ideal1 =~ NA*extra_ideal_par1_t1 + lambda1*extra_ideal_par1_t1 + lambda2*extra_ideal_par2_t1 + lambda3*extra_ideal_par3_t1
extra_ideal2 =~ NA*extra_ideal_par1_t2 + lambda1*extra_ideal_par1_t2 + lambda2*extra_ideal_par2_t2 + lambda3*extra_ideal_par3_t2

# Intercepts
extra_ideal_par1_t1 ~ i1*1
extra_ideal_par2_t1 ~ i2*1
extra_ideal_par3_t1 ~ i3*1

extra_ideal_par1_t2 ~ i1*1
extra_ideal_par2_t2 ~ i2*1
extra_ideal_par3_t2 ~ i3*1

# Unique Variances
extra_ideal_par1_t1 ~~ extra_ideal_par1_t1
extra_ideal_par2_t1 ~~ extra_ideal_par2_t1
extra_ideal_par3_t1 ~~ extra_ideal_par3_t1

extra_ideal_par1_t2 ~~ extra_ideal_par1_t2
extra_ideal_par2_t2 ~~ extra_ideal_par2_t2
extra_ideal_par3_t2 ~~ extra_ideal_par3_t2

# Latent Variable Means
extra_ideal1 ~ 0*1
extra_ideal2 ~ 1

# Latent Variable Variances and Covariance
extra_ideal1 ~~ 1*extra_ideal1
extra_ideal2 ~~ extra_ideal2
extra_ideal1 ~~ extra_ideal2
'
fit_strong_extra_ideal <- cfa(strong_extra_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), 
                              mimic = "mplus", missing="ML")
summary(fit_strong_extra_ideal, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_extra_ideal <- '
# Define the latent factors
extra_ideal1 =~ NA*extra_ideal_par1_t1 + lambda1*extra_ideal_par1_t1 + lambda2*extra_ideal_par2_t1 + lambda3*extra_ideal_par3_t1
extra_ideal2 =~ NA*extra_ideal_par1_t2 + lambda1*extra_ideal_par1_t2 + lambda2*extra_ideal_par2_t2 + lambda3*extra_ideal_par3_t2

# Intercepts
extra_ideal_par1_t1 ~ i1*1
extra_ideal_par2_t1 ~ i2*1
extra_ideal_par3_t1 ~ i3*1

extra_ideal_par1_t2 ~ i1*1
extra_ideal_par2_t2 ~ i2*1
extra_ideal_par3_t2 ~ i3*1

# Unique Variances
extra_ideal_par1_t1 ~~ u1*extra_ideal_par1_t1
extra_ideal_par2_t1 ~~ u2*extra_ideal_par2_t1
extra_ideal_par3_t1 ~~ u3*extra_ideal_par3_t1

extra_ideal_par1_t2 ~~ u1*extra_ideal_par1_t2
extra_ideal_par2_t2 ~~ u2*extra_ideal_par2_t2
extra_ideal_par3_t2 ~~ u3*extra_ideal_par3_t2

# Latent Variable Means
extra_ideal1 ~ 0*1
extra_ideal2 ~ 1

# Latent Variable Variances and Covariance
extra_ideal1 ~~ 1*extra_ideal1
extra_ideal2 ~~ extra_ideal2
extra_ideal1 ~~ extra_ideal2
'
fit_strict_extra_ideal <- cfa(strict_extra_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), 
                              mimic = "mplus", missing="ML")
summary(fit_strict_extra_ideal, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_extra_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_extra_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_extra_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_extra_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli  rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>  <dbl>
1 configural   175    19  35.1 1669. 1729. 0.910 0.831 0.139  0.0514
2 weak         175    17  36.5 1666. 1720. 0.912 0.868 0.123  0.0713
3 strong       175    15  37.5 1663. 1711. 0.915 0.894 0.110  0.0772
4 strict       175    12  39.7 1660. 1698. 0.918 0.918 0.0971 0.0985
# chi-square difference test for nested models 
anova(fit_configural_extra_ideal, fit_weak_extra_ideal)

Chi-Squared Difference Test

                           Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_extra_ideal  8 1669.0 1729.2 35.124                         
fit_weak_extra_ideal       10 1666.4 1720.2 36.522     1.3981     0       2
                           Pr(>Chisq)
fit_configural_extra_ideal           
fit_weak_extra_ideal           0.4971
anova(fit_weak_extra_ideal, fit_strong_extra_ideal)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_weak_extra_ideal   10 1666.4 1720.2 36.522                         
fit_strong_extra_ideal 12 1663.4 1710.9 37.517    0.99445     0       2
                       Pr(>Chisq)
fit_weak_extra_ideal             
fit_strong_extra_ideal     0.6082
anova(fit_strong_extra_ideal, fit_strict_extra_ideal)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_strong_extra_ideal 12 1663.4 1710.9 37.517                         
fit_strict_extra_ideal 15 1659.6 1697.6 39.739     2.2225     0       3
                       Pr(>Chisq)
fit_strong_extra_ideal           
fit_strict_extra_ideal     0.5275

Satisfactory model fit across all stages of measurement invariance (except for RMSEA). Chi^2 tests indicate that strict measurement invariance is given.

4.2.3 Agreeableness: current personality

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_agree_curr <- '
# Define the latent factors
agree_curr1 =~ NA*agree_curr_par1_t1 + lambda1*agree_curr_par1_t1 + agree_curr_par2_t1 + agree_curr_par3_t1
agree_curr2 =~ NA*agree_curr_par1_t2 + lambda1*agree_curr_par1_t2 + agree_curr_par2_t2 + agree_curr_par3_t2

# Intercepts
agree_curr_par1_t1 ~ i1*1
agree_curr_par2_t1 ~ 1
agree_curr_par3_t1 ~ 1

agree_curr_par1_t2 ~ i1*1
agree_curr_par2_t2 ~ 1
agree_curr_par3_t2 ~ 1

# Unique Variances
agree_curr_par1_t1 ~~ agree_curr_par1_t1
agree_curr_par2_t1 ~~ agree_curr_par2_t1
agree_curr_par3_t1 ~~ agree_curr_par3_t1

agree_curr_par1_t2 ~~ agree_curr_par1_t2
agree_curr_par2_t2 ~~ agree_curr_par2_t2
agree_curr_par3_t2 ~~ agree_curr_par3_t2

# Latent Variable Means
agree_curr1 ~ 0*1
agree_curr2 ~ 1

# Latent Variable Variances and Covariance
agree_curr1 ~~ 1*agree_curr1
agree_curr2 ~~ agree_curr2
agree_curr1 ~~ agree_curr2
'
fit_configural_agree_curr <- cfa(configural_agree_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_configural_agree_curr, fit.measures = TRUE)

# Weak invariance model
weak_agree_curr <- '
# Define the latent factors
agree_curr1 =~ NA*agree_curr_par1_t1 + lambda1*agree_curr_par1_t1 + lambda2*agree_curr_par2_t1 + lambda3*agree_curr_par3_t1
agree_curr2 =~ NA*agree_curr_par1_t2 + lambda1*agree_curr_par1_t2 + lambda2*agree_curr_par2_t2 + lambda3*agree_curr_par3_t2

# Intercepts
agree_curr_par1_t1 ~ i1*1
agree_curr_par2_t1 ~ 1
agree_curr_par3_t1 ~ 1

agree_curr_par1_t2 ~ i1*1
agree_curr_par2_t2 ~ 1
agree_curr_par3_t2 ~ 1

# Unique Variances
agree_curr_par1_t1 ~~ agree_curr_par1_t1
agree_curr_par2_t1 ~~ agree_curr_par2_t1
agree_curr_par3_t1 ~~ agree_curr_par3_t1

agree_curr_par1_t2 ~~ agree_curr_par1_t2
agree_curr_par2_t2 ~~ agree_curr_par2_t2
agree_curr_par3_t2 ~~ agree_curr_par3_t2

# Latent Variable Means
agree_curr1 ~ 0*1
agree_curr2 ~ 1

# Latent Variable Variances and Covariance
agree_curr1 ~~ 1*agree_curr1
agree_curr2 ~~ agree_curr2
agree_curr1 ~~ agree_curr2
'
fit_weak_agree_curr <- cfa(weak_agree_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_weak_agree_curr, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_agree_curr <- '
# Define the latent factors
agree_curr1 =~ NA*agree_curr_par1_t1 + lambda1*agree_curr_par1_t1 + lambda2*agree_curr_par2_t1 + lambda3*agree_curr_par3_t1
agree_curr2 =~ NA*agree_curr_par1_t2 + lambda1*agree_curr_par1_t2 + lambda2*agree_curr_par2_t2 + lambda3*agree_curr_par3_t2

# Intercepts
agree_curr_par1_t1 ~ i1*1
agree_curr_par2_t1 ~ i2*1
agree_curr_par3_t1 ~ i3*1

agree_curr_par1_t2 ~ i1*1
agree_curr_par2_t2 ~ i2*1
agree_curr_par3_t2 ~ i3*1

# Unique Variances
agree_curr_par1_t1 ~~ agree_curr_par1_t1
agree_curr_par2_t1 ~~ agree_curr_par2_t1
agree_curr_par3_t1 ~~ agree_curr_par3_t1

agree_curr_par1_t2 ~~ agree_curr_par1_t2
agree_curr_par2_t2 ~~ agree_curr_par2_t2
agree_curr_par3_t2 ~~ agree_curr_par3_t2

# Latent Variable Means
agree_curr1 ~ 0*1
agree_curr2 ~ 1

# Latent Variable Variances and Covariance
agree_curr1 ~~ 1*agree_curr1
agree_curr2 ~~ agree_curr2
agree_curr1 ~~ agree_curr2
'
fit_strong_agree_curr <- cfa(strong_agree_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_strong_agree_curr, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_agree_curr <- '
# Define the latent factors
agree_curr1 =~ NA*agree_curr_par1_t1 + lambda1*agree_curr_par1_t1 + lambda2*agree_curr_par2_t1 + lambda3*agree_curr_par3_t1
agree_curr2 =~ NA*agree_curr_par1_t2 + lambda1*agree_curr_par1_t2 + lambda2*agree_curr_par2_t2 + lambda3*agree_curr_par3_t2

# Intercepts
agree_curr_par1_t1 ~ i1*1
agree_curr_par2_t1 ~ i2*1
agree_curr_par3_t1 ~ i3*1

agree_curr_par1_t2 ~ i1*1
agree_curr_par2_t2 ~ i2*1
agree_curr_par3_t2 ~ i3*1

# Unique Variances
agree_curr_par1_t1 ~~ u1*agree_curr_par1_t1
agree_curr_par2_t1 ~~ u2*agree_curr_par2_t1
agree_curr_par3_t1 ~~ u3*agree_curr_par3_t1

agree_curr_par1_t2 ~~ u1*agree_curr_par1_t2
agree_curr_par2_t2 ~~ u2*agree_curr_par2_t2
agree_curr_par3_t2 ~~ u3*agree_curr_par3_t2

# Latent Variable Means
agree_curr1 ~ 0*1
agree_curr2 ~ 1

# Latent Variable Variances and Covariance
agree_curr1 ~~ 1*agree_curr1
agree_curr2 ~~ agree_curr2
agree_curr1 ~~ agree_curr2
'
fit_strict_agree_curr <- cfa(strict_agree_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_strict_agree_curr, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_agree_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_agree_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_agree_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_agree_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>
1 configural   530    19  241. 5004. 5085. 0.858 0.734 0.235 0.0584
2 weak         530    17  242. 5001. 5073. 0.859 0.789 0.209 0.0605
3 strong       530    15  245. 5000. 5064. 0.859 0.823 0.191 0.0624
4 strict       530    12  253. 5002. 5053. 0.856 0.856 0.173 0.0833
# chi-square difference test for nested models 
anova(fit_configural_agree_curr, fit_weak_agree_curr)

Chi-Squared Difference Test

                          Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_agree_curr  8 5004.2 5085.4 241.50                         
fit_weak_agree_curr       10 5000.8 5073.5 242.14    0.64558     0       2
                          Pr(>Chisq)
fit_configural_agree_curr           
fit_weak_agree_curr           0.7241
anova(fit_weak_agree_curr, fit_strong_agree_curr)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff   RMSEA Df diff
fit_weak_agree_curr   10 5000.8 5073.5 242.14                           
fit_strong_agree_curr 12 4999.6 5063.7 244.89     2.7511 0.02662       2
                      Pr(>Chisq)
fit_weak_agree_curr             
fit_strong_agree_curr     0.2527
anova(fit_strong_agree_curr, fit_strict_agree_curr)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff    RMSEA Df diff
fit_strong_agree_curr 12 4999.6 5063.7 244.89                            
fit_strict_agree_curr 15 5001.5 5052.8 252.81     7.9229 0.055643       3
                      Pr(>Chisq)  
fit_strong_agree_curr             
fit_strict_agree_curr    0.04763 *
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Model fit still not satisfactory. Chi^2 tests indicate that strong measurement invariance is given (at p < .05; strict at p < .01).

4.2.4 Agreeableness: ideal personality

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_agree_ideal <- '
# Define the latent factors
agree_ideal1 =~ NA*agree_ideal_par1_t1 + lambda1*agree_ideal_par1_t1 + agree_ideal_par2_t1 + agree_ideal_par3_t1
agree_ideal2 =~ NA*agree_ideal_par1_t2 + lambda1*agree_ideal_par1_t2 + agree_ideal_par2_t2 + agree_ideal_par3_t2

# Intercepts
agree_ideal_par1_t1 ~ i1*1
agree_ideal_par2_t1 ~ 1
agree_ideal_par3_t1 ~ 1

agree_ideal_par1_t2 ~ i1*1
agree_ideal_par2_t2 ~ 1
agree_ideal_par3_t2 ~ 1

# Unique Variances
agree_ideal_par1_t1 ~~ agree_ideal_par1_t1
agree_ideal_par2_t1 ~~ agree_ideal_par2_t1
agree_ideal_par3_t1 ~~ agree_ideal_par3_t1

agree_ideal_par1_t2 ~~ agree_ideal_par1_t2
agree_ideal_par2_t2 ~~ agree_ideal_par2_t2
agree_ideal_par3_t2 ~~ agree_ideal_par3_t2

# Latent Variable Means
agree_ideal1 ~ 0*1
agree_ideal2 ~ 1

# Latent Variable Variances and Covariance
agree_ideal1 ~~ 1*agree_ideal1
agree_ideal2 ~~ agree_ideal2
agree_ideal1 ~~ agree_ideal2
'
fit_configural_agree_ideal <- cfa(configural_agree_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), 
                                  mimic = "mplus", missing="ML")
summary(fit_configural_agree_ideal, fit.measures = TRUE)

# Weak invariance model
weak_agree_ideal <- '
# Define the latent factors
agree_ideal1 =~ NA*agree_ideal_par1_t1 + lambda1*agree_ideal_par1_t1 + lambda2*agree_ideal_par2_t1 + lambda3*agree_ideal_par3_t1
agree_ideal2 =~ NA*agree_ideal_par1_t2 + lambda1*agree_ideal_par1_t2 + lambda2*agree_ideal_par2_t2 + lambda3*agree_ideal_par3_t2

# Intercepts
agree_ideal_par1_t1 ~ i1*1
agree_ideal_par2_t1 ~ 1
agree_ideal_par3_t1 ~ 1

agree_ideal_par1_t2 ~ i1*1
agree_ideal_par2_t2 ~ 1
agree_ideal_par3_t2 ~ 1

# Unique Variances
agree_ideal_par1_t1 ~~ agree_ideal_par1_t1
agree_ideal_par2_t1 ~~ agree_ideal_par2_t1
agree_ideal_par3_t1 ~~ agree_ideal_par3_t1

agree_ideal_par1_t2 ~~ agree_ideal_par1_t2
agree_ideal_par2_t2 ~~ agree_ideal_par2_t2
agree_ideal_par3_t2 ~~ agree_ideal_par3_t2

# Latent Variable Means
agree_ideal1 ~ 0*1
agree_ideal2 ~ 1

# Latent Variable Variances and Covariance
agree_ideal1 ~~ 1*agree_ideal1
agree_ideal2 ~~ agree_ideal2
agree_ideal1 ~~ agree_ideal2
'
fit_weak_agree_ideal <- cfa(weak_agree_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_weak_agree_ideal, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_agree_ideal <- '
# Define the latent factors
agree_ideal1 =~ NA*agree_ideal_par1_t1 + lambda1*agree_ideal_par1_t1 + lambda2*agree_ideal_par2_t1 + lambda3*agree_ideal_par3_t1
agree_ideal2 =~ NA*agree_ideal_par1_t2 + lambda1*agree_ideal_par1_t2 + lambda2*agree_ideal_par2_t2 + lambda3*agree_ideal_par3_t2

# Intercepts
agree_ideal_par1_t1 ~ i1*1
agree_ideal_par2_t1 ~ i2*1
agree_ideal_par3_t1 ~ i3*1

agree_ideal_par1_t2 ~ i1*1
agree_ideal_par2_t2 ~ i2*1
agree_ideal_par3_t2 ~ i3*1

# Unique Variances
agree_ideal_par1_t1 ~~ agree_ideal_par1_t1
agree_ideal_par2_t1 ~~ agree_ideal_par2_t1
agree_ideal_par3_t1 ~~ agree_ideal_par3_t1

agree_ideal_par1_t2 ~~ agree_ideal_par1_t2
agree_ideal_par2_t2 ~~ agree_ideal_par2_t2
agree_ideal_par3_t2 ~~ agree_ideal_par3_t2

# Latent Variable Means
agree_ideal1 ~ 0*1
agree_ideal2 ~ 1

# Latent Variable Variances and Covariance
agree_ideal1 ~~ 1*agree_ideal1
agree_ideal2 ~~ agree_ideal2
agree_ideal1 ~~ agree_ideal2
'
fit_strong_agree_ideal <- cfa(strong_agree_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_strong_agree_ideal, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_agree_ideal <- '
# Define the latent factors
agree_ideal1 =~ NA*agree_ideal_par1_t1 + lambda1*agree_ideal_par1_t1 + lambda2*agree_ideal_par2_t1 + lambda3*agree_ideal_par3_t1
agree_ideal2 =~ NA*agree_ideal_par1_t2 + lambda1*agree_ideal_par1_t2 + lambda2*agree_ideal_par2_t2 + lambda3*agree_ideal_par3_t2

# Intercepts
agree_ideal_par1_t1 ~ i1*1
agree_ideal_par2_t1 ~ i2*1
agree_ideal_par3_t1 ~ i3*1

agree_ideal_par1_t2 ~ i1*1
agree_ideal_par2_t2 ~ i2*1
agree_ideal_par3_t2 ~ i3*1

# Unique Variances
agree_ideal_par1_t1 ~~ u1*agree_ideal_par1_t1
agree_ideal_par2_t1 ~~ u2*agree_ideal_par2_t1
agree_ideal_par3_t1 ~~ u3*agree_ideal_par3_t1

agree_ideal_par1_t2 ~~ u1*agree_ideal_par1_t2
agree_ideal_par2_t2 ~~ u2*agree_ideal_par2_t2
agree_ideal_par3_t2 ~~ u3*agree_ideal_par3_t2

# Latent Variable Means
agree_ideal1 ~ 0*1
agree_ideal2 ~ 1

# Latent Variable Variances and Covariance
agree_ideal1 ~~ 1*agree_ideal1
agree_ideal2 ~~ agree_ideal2
agree_ideal1 ~~ agree_ideal2
'
fit_strict_agree_ideal <- cfa(strict_agree_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_strict_agree_ideal, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_agree_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_agree_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_agree_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_agree_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>
1 configural   175    19  36.4 1606. 1666. 0.923 0.856 0.142 0.0471
2 weak         175    17  38.5 1604. 1658. 0.923 0.884 0.128 0.0786
3 strong       175    15  41.0 1603. 1650. 0.921 0.902 0.118 0.0803
4 strict       175    12  44.2 1600. 1638. 0.921 0.921 0.105 0.100 
# chi-square difference test for nested models 
anova(fit_configural_agree_ideal, fit_weak_agree_ideal)

Chi-Squared Difference Test

                           Df    AIC    BIC  Chisq Chisq diff    RMSEA Df diff
fit_configural_agree_ideal  8 1606.2 1666.3 36.375                            
fit_weak_agree_ideal       10 1604.3 1658.1 38.507     2.1316 0.019388       2
                           Pr(>Chisq)
fit_configural_agree_ideal           
fit_weak_agree_ideal           0.3445
anova(fit_weak_agree_ideal, fit_strong_agree_ideal)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff   RMSEA Df diff
fit_weak_agree_ideal   10 1604.3 1658.1 38.507                           
fit_strong_agree_ideal 12 1602.8 1650.3 41.024     2.5174 0.03845       2
                       Pr(>Chisq)
fit_weak_agree_ideal             
fit_strong_agree_ideal      0.284
anova(fit_strong_agree_ideal, fit_strict_agree_ideal)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff    RMSEA Df diff
fit_strong_agree_ideal 12 1602.8 1650.3 41.024                            
fit_strict_agree_ideal 15 1600.0 1638.0 44.200     3.1753 0.018273       3
                       Pr(>Chisq)
fit_strong_agree_ideal           
fit_strict_agree_ideal     0.3654

Good model fit across all stages of measurement invariance (except for RMSEA). Chi^2 tests indicate that strict measurement invariance is given.

4.2.5 Conscientiousness: current personality

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_consc_curr <- '
# Define the latent factors
consc_curr1 =~ NA*consc_curr_par1_t1 + lambda1*consc_curr_par1_t1 + consc_curr_par2_t1 + consc_curr_par3_t1
consc_curr2 =~ NA*consc_curr_par1_t2 + lambda1*consc_curr_par1_t2 + consc_curr_par2_t2 + consc_curr_par3_t2

# Intercepts
consc_curr_par1_t1 ~ i1*1
consc_curr_par2_t1 ~ 1
consc_curr_par3_t1 ~ 1

consc_curr_par1_t2 ~ i1*1
consc_curr_par2_t2 ~ 1
consc_curr_par3_t2 ~ 1

# Unique Variances
consc_curr_par1_t1 ~~ consc_curr_par1_t1
consc_curr_par2_t1 ~~ consc_curr_par2_t1
consc_curr_par3_t1 ~~ consc_curr_par3_t1

consc_curr_par1_t2 ~~ consc_curr_par1_t2
consc_curr_par2_t2 ~~ consc_curr_par2_t2
consc_curr_par3_t2 ~~ consc_curr_par3_t2

# Latent Variable Means
consc_curr1 ~ 0*1
consc_curr2 ~ 1

# Latent Variable Variances and Covariance
consc_curr1 ~~ 1*consc_curr1
consc_curr2 ~~ consc_curr2
consc_curr1 ~~ consc_curr2
'
fit_configural_consc_curr <- cfa(configural_consc_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_configural_consc_curr, fit.measures = TRUE)

# Weak invariance model
weak_consc_curr <- '
# Define the latent factors
consc_curr1 =~ NA*consc_curr_par1_t1 + lambda1*consc_curr_par1_t1 + lambda2*consc_curr_par2_t1 + lambda3*consc_curr_par3_t1
consc_curr2 =~ NA*consc_curr_par1_t2 + lambda1*consc_curr_par1_t2 + lambda2*consc_curr_par2_t2 + lambda3*consc_curr_par3_t2

# Intercepts
consc_curr_par1_t1 ~ i1*1
consc_curr_par2_t1 ~ 1
consc_curr_par3_t1 ~ 1

consc_curr_par1_t2 ~ i1*1
consc_curr_par2_t2 ~ 1
consc_curr_par3_t2 ~ 1

# Unique Variances
consc_curr_par1_t1 ~~ consc_curr_par1_t1
consc_curr_par2_t1 ~~ consc_curr_par2_t1
consc_curr_par3_t1 ~~ consc_curr_par3_t1

consc_curr_par1_t2 ~~ consc_curr_par1_t2
consc_curr_par2_t2 ~~ consc_curr_par2_t2
consc_curr_par3_t2 ~~ consc_curr_par3_t2

# Latent Variable Means
consc_curr1 ~ 0*1
consc_curr2 ~ 1

# Latent Variable Variances and Covariance
consc_curr1 ~~ 1*consc_curr1
consc_curr2 ~~ consc_curr2
consc_curr1 ~~ consc_curr2
'
fit_weak_consc_curr <- cfa(weak_consc_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_weak_consc_curr, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_consc_curr <- '
# Define the latent factors
consc_curr1 =~ NA*consc_curr_par1_t1 + lambda1*consc_curr_par1_t1 + lambda2*consc_curr_par2_t1 + lambda3*consc_curr_par3_t1
consc_curr2 =~ NA*consc_curr_par1_t2 + lambda1*consc_curr_par1_t2 + lambda2*consc_curr_par2_t2 + lambda3*consc_curr_par3_t2

# Intercepts
consc_curr_par1_t1 ~ i1*1
consc_curr_par2_t1 ~ i2*1
consc_curr_par3_t1 ~ i3*1

consc_curr_par1_t2 ~ i1*1
consc_curr_par2_t2 ~ i2*1
consc_curr_par3_t2 ~ i3*1

# Unique Variances
consc_curr_par1_t1 ~~ consc_curr_par1_t1
consc_curr_par2_t1 ~~ consc_curr_par2_t1
consc_curr_par3_t1 ~~ consc_curr_par3_t1

consc_curr_par1_t2 ~~ consc_curr_par1_t2
consc_curr_par2_t2 ~~ consc_curr_par2_t2
consc_curr_par3_t2 ~~ consc_curr_par3_t2

# Latent Variable Means
consc_curr1 ~ 0*1
consc_curr2 ~ 1

# Latent Variable Variances and Covariance
consc_curr1 ~~ 1*consc_curr1
consc_curr2 ~~ consc_curr2
consc_curr1 ~~ consc_curr2
'
fit_strong_consc_curr <- cfa(strong_consc_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_strong_consc_curr, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_consc_curr <- '
# Define the latent factors
consc_curr1 =~ NA*consc_curr_par1_t1 + lambda1*consc_curr_par1_t1 + lambda2*consc_curr_par2_t1 + lambda3*consc_curr_par3_t1
consc_curr2 =~ NA*consc_curr_par1_t2 + lambda1*consc_curr_par1_t2 + lambda2*consc_curr_par2_t2 + lambda3*consc_curr_par3_t2

# Intercepts
consc_curr_par1_t1 ~ i1*1
consc_curr_par2_t1 ~ i2*1
consc_curr_par3_t1 ~ i3*1

consc_curr_par1_t2 ~ i1*1
consc_curr_par2_t2 ~ i2*1
consc_curr_par3_t2 ~ i3*1

# Unique Variances
consc_curr_par1_t1 ~~ u1*consc_curr_par1_t1
consc_curr_par2_t1 ~~ u2*consc_curr_par2_t1
consc_curr_par3_t1 ~~ u3*consc_curr_par3_t1

consc_curr_par1_t2 ~~ u1*consc_curr_par1_t2
consc_curr_par2_t2 ~~ u2*consc_curr_par2_t2
consc_curr_par3_t2 ~~ u3*consc_curr_par3_t2

# Latent Variable Means
consc_curr1 ~ 0*1
consc_curr2 ~ 1

# Latent Variable Variances and Covariance
consc_curr1 ~~ 1*consc_curr1
consc_curr2 ~~ consc_curr2
consc_curr1 ~~ consc_curr2
'
fit_strict_consc_curr <- cfa(strict_consc_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_strict_consc_curr, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_consc_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_consc_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_consc_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_consc_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>
1 configural   530    19  207. 5035. 5116. 0.921 0.851 0.217 0.0358
2 weak         530    17  207. 5031. 5103. 0.921 0.882 0.193 0.0359
3 strong       530    15  208. 5027. 5092. 0.922 0.902 0.175 0.0358
4 strict       530    12  211. 5024. 5076. 0.922 0.922 0.157 0.0401
# chi-square difference test for nested models 
anova(fit_configural_consc_curr, fit_weak_consc_curr)

Chi-Squared Difference Test

                          Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_consc_curr  8 5034.5 5115.7 206.85                         
fit_weak_consc_curr       10 5030.7 5103.4 207.05    0.20391     0       2
                          Pr(>Chisq)
fit_configural_consc_curr           
fit_weak_consc_curr           0.9031
anova(fit_weak_consc_curr, fit_strong_consc_curr)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_weak_consc_curr   10 5030.7 5103.4 207.05                         
fit_strong_consc_curr 12 5027.5 5091.6 207.80    0.74741     0       2
                      Pr(>Chisq)
fit_weak_consc_curr             
fit_strong_consc_curr     0.6882
anova(fit_strong_consc_curr, fit_strict_consc_curr)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_strong_consc_curr 12 5027.5 5091.6 207.80                         
fit_strict_consc_curr 15 5024.2 5075.5 210.55     2.7538     0       3
                      Pr(>Chisq)
fit_strong_consc_curr           
fit_strict_consc_curr     0.4312

Good model fit across all stages of measurement invariance (except for RMSEA). Chi^2 tests indicate that strict measurement invariance is given.

4.2.6 Conscientiousness: ideal personality

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_consc_ideal <- '
# Define the latent factors
consc_ideal1 =~ NA*consc_ideal_par1_t1 + lambda1*consc_ideal_par1_t1 + consc_ideal_par2_t1 + consc_ideal_par3_t1
consc_ideal2 =~ NA*consc_ideal_par1_t2 + lambda1*consc_ideal_par1_t2 + consc_ideal_par2_t2 + consc_ideal_par3_t2

# Intercepts
consc_ideal_par1_t1 ~ i1*1
consc_ideal_par2_t1 ~ 1
consc_ideal_par3_t1 ~ 1

consc_ideal_par1_t2 ~ i1*1
consc_ideal_par2_t2 ~ 1
consc_ideal_par3_t2 ~ 1

# Unique Variances
consc_ideal_par1_t1 ~~ consc_ideal_par1_t1
consc_ideal_par2_t1 ~~ consc_ideal_par2_t1
consc_ideal_par3_t1 ~~ consc_ideal_par3_t1

consc_ideal_par1_t2 ~~ consc_ideal_par1_t2
consc_ideal_par2_t2 ~~ consc_ideal_par2_t2
consc_ideal_par3_t2 ~~ consc_ideal_par3_t2

# Latent Variable Means
consc_ideal1 ~ 0*1
consc_ideal2 ~ 1

# Latent Variable Variances and Covariance
consc_ideal1 ~~ 1*consc_ideal1
consc_ideal2 ~~ consc_ideal2
consc_ideal1 ~~ consc_ideal2
'
fit_configural_consc_ideal <- cfa(configural_consc_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_configural_consc_ideal, fit.measures = TRUE)

# Weak invariance model
weak_consc_ideal <- '
# Define the latent factors
consc_ideal1 =~ NA*consc_ideal_par1_t1 + lambda1*consc_ideal_par1_t1 + lambda2*consc_ideal_par2_t1 + lambda3*consc_ideal_par3_t1
consc_ideal2 =~ NA*consc_ideal_par1_t2 + lambda1*consc_ideal_par1_t2 + lambda2*consc_ideal_par2_t2 + lambda3*consc_ideal_par3_t2

# Intercepts
consc_ideal_par1_t1 ~ i1*1
consc_ideal_par2_t1 ~ 1
consc_ideal_par3_t1 ~ 1

consc_ideal_par1_t2 ~ i1*1
consc_ideal_par2_t2 ~ 1
consc_ideal_par3_t2 ~ 1

# Unique Variances
consc_ideal_par1_t1 ~~ consc_ideal_par1_t1
consc_ideal_par2_t1 ~~ consc_ideal_par2_t1
consc_ideal_par3_t1 ~~ consc_ideal_par3_t1

consc_ideal_par1_t2 ~~ consc_ideal_par1_t2
consc_ideal_par2_t2 ~~ consc_ideal_par2_t2
consc_ideal_par3_t2 ~~ consc_ideal_par3_t2

# Latent Variable Means
consc_ideal1 ~ 0*1
consc_ideal2 ~ 1

# Latent Variable Variances and Covariance
consc_ideal1 ~~ 1*consc_ideal1
consc_ideal2 ~~ consc_ideal2
consc_ideal1 ~~ consc_ideal2
'
fit_weak_consc_ideal <- cfa(weak_consc_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_weak_consc_ideal, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_consc_ideal <- '
# Define the latent factors
consc_ideal1 =~ NA*consc_ideal_par1_t1 + lambda1*consc_ideal_par1_t1 + lambda2*consc_ideal_par2_t1 + lambda3*consc_ideal_par3_t1
consc_ideal2 =~ NA*consc_ideal_par1_t2 + lambda1*consc_ideal_par1_t2 + lambda2*consc_ideal_par2_t2 + lambda3*consc_ideal_par3_t2

# Intercepts
consc_ideal_par1_t1 ~ i1*1
consc_ideal_par2_t1 ~ i2*1
consc_ideal_par3_t1 ~ i3*1

consc_ideal_par1_t2 ~ i1*1
consc_ideal_par2_t2 ~ i2*1
consc_ideal_par3_t2 ~ i3*1

# Unique Variances
consc_ideal_par1_t1 ~~ consc_ideal_par1_t1
consc_ideal_par2_t1 ~~ consc_ideal_par2_t1
consc_ideal_par3_t1 ~~ consc_ideal_par3_t1

consc_ideal_par1_t2 ~~ consc_ideal_par1_t2
consc_ideal_par2_t2 ~~ consc_ideal_par2_t2
consc_ideal_par3_t2 ~~ consc_ideal_par3_t2

# Latent Variable Means
consc_ideal1 ~ 0*1
consc_ideal2 ~ 1

# Latent Variable Variances and Covariance
consc_ideal1 ~~ 1*consc_ideal1
consc_ideal2 ~~ consc_ideal2
consc_ideal1 ~~ consc_ideal2
'
fit_strong_consc_ideal <- cfa(strong_consc_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_strong_consc_ideal, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_consc_ideal <- '
# Define the latent factors
consc_ideal1 =~ NA*consc_ideal_par1_t1 + lambda1*consc_ideal_par1_t1 + lambda2*consc_ideal_par2_t1 + lambda3*consc_ideal_par3_t1
consc_ideal2 =~ NA*consc_ideal_par1_t2 + lambda1*consc_ideal_par1_t2 + lambda2*consc_ideal_par2_t2 + lambda3*consc_ideal_par3_t2

# Intercepts
consc_ideal_par1_t1 ~ i1*1
consc_ideal_par2_t1 ~ i2*1
consc_ideal_par3_t1 ~ i3*1

consc_ideal_par1_t2 ~ i1*1
consc_ideal_par2_t2 ~ i2*1
consc_ideal_par3_t2 ~ i3*1

# Unique Variances
consc_ideal_par1_t1 ~~ u1*consc_ideal_par1_t1
consc_ideal_par2_t1 ~~ u2*consc_ideal_par2_t1
consc_ideal_par3_t1 ~~ u3*consc_ideal_par3_t1

consc_ideal_par1_t2 ~~ u1*consc_ideal_par1_t2
consc_ideal_par2_t2 ~~ u2*consc_ideal_par2_t2
consc_ideal_par3_t2 ~~ u3*consc_ideal_par3_t2

# Latent Variable Means
consc_ideal1 ~ 0*1
consc_ideal2 ~ 1

# Latent Variable Variances and Covariance
consc_ideal1 ~~ 1*consc_ideal1
consc_ideal2 ~~ consc_ideal2
consc_ideal1 ~~ consc_ideal2
'
fit_strict_consc_ideal <- cfa(strict_consc_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_strict_consc_ideal, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_consc_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_consc_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_consc_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_consc_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli  rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>  <dbl>
1 configural   175    19  27.7 1361. 1422. 0.930 0.870 0.119  0.0437
2 weak         175    17  28.7 1358. 1412. 0.934 0.901 0.103  0.0647
3 strong       175    15  29.0 1355. 1402. 0.940 0.925 0.0899 0.0685
4 strict       175    12  31.1 1351. 1389. 0.943 0.943 0.0782 0.0858
# chi-square difference test for nested models 
anova(fit_configural_consc_ideal, fit_weak_consc_ideal)

Chi-Squared Difference Test

                           Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_consc_ideal  8 1361.4 1421.5 27.747                         
fit_weak_consc_ideal       10 1358.4 1412.2 28.737    0.99026     0       2
                           Pr(>Chisq)
fit_configural_consc_ideal           
fit_weak_consc_ideal           0.6095
anova(fit_weak_consc_ideal, fit_strong_consc_ideal)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_weak_consc_ideal   10 1358.4 1412.2 28.737                         
fit_strong_consc_ideal 12 1354.6 1402.1 28.981    0.24429     0       2
                       Pr(>Chisq)
fit_weak_consc_ideal             
fit_strong_consc_ideal      0.885
anova(fit_strong_consc_ideal, fit_strict_consc_ideal)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_strong_consc_ideal 12 1354.6 1402.1 28.981                         
fit_strict_consc_ideal 15 1350.7 1388.7 31.062     2.0812     0       3
                       Pr(>Chisq)
fit_strong_consc_ideal           
fit_strict_consc_ideal     0.5557

Good model fit across all stages of measurement invariance. Chi^2 tests indicate that strict measurement invariance is given.

4.2.7 Neuroticism: current personality

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_neuro_curr <- '
# Define the latent factors
neuro_curr1 =~ NA*neuro_curr_par1_t1 + lambda1*neuro_curr_par1_t1 + neuro_curr_par2_t1 + neuro_curr_par3_t1
neuro_curr2 =~ NA*neuro_curr_par1_t2 + lambda1*neuro_curr_par1_t2 + neuro_curr_par2_t2 + neuro_curr_par3_t2

# Intercepts
neuro_curr_par1_t1 ~ i1*1
neuro_curr_par2_t1 ~ 1
neuro_curr_par3_t1 ~ 1

neuro_curr_par1_t2 ~ i1*1
neuro_curr_par2_t2 ~ 1
neuro_curr_par3_t2 ~ 1

# Unique Variances
neuro_curr_par1_t1 ~~ neuro_curr_par1_t1
neuro_curr_par2_t1 ~~ neuro_curr_par2_t1
neuro_curr_par3_t1 ~~ neuro_curr_par3_t1

neuro_curr_par1_t2 ~~ neuro_curr_par1_t2
neuro_curr_par2_t2 ~~ neuro_curr_par2_t2
neuro_curr_par3_t2 ~~ neuro_curr_par3_t2

# Latent Variable Means
neuro_curr1 ~ 0*1
neuro_curr2 ~ 1

# Latent Variable Variances and Covariance
neuro_curr1 ~~ 1*neuro_curr1
neuro_curr2 ~~ neuro_curr2
neuro_curr1 ~~ neuro_curr2
'
fit_configural_neuro_curr <- cfa(configural_neuro_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_configural_neuro_curr, fit.measures = TRUE)

# Weak invariance model
weak_neuro_curr <- '
# Define the latent factors
neuro_curr1 =~ NA*neuro_curr_par1_t1 + lambda1*neuro_curr_par1_t1 + lambda2*neuro_curr_par2_t1 + lambda3*neuro_curr_par3_t1
neuro_curr2 =~ NA*neuro_curr_par1_t2 + lambda1*neuro_curr_par1_t2 + lambda2*neuro_curr_par2_t2 + lambda3*neuro_curr_par3_t2

# Intercepts
neuro_curr_par1_t1 ~ i1*1
neuro_curr_par2_t1 ~ 1
neuro_curr_par3_t1 ~ 1

neuro_curr_par1_t2 ~ i1*1
neuro_curr_par2_t2 ~ 1
neuro_curr_par3_t2 ~ 1

# Unique Variances
neuro_curr_par1_t1 ~~ neuro_curr_par1_t1
neuro_curr_par2_t1 ~~ neuro_curr_par2_t1
neuro_curr_par3_t1 ~~ neuro_curr_par3_t1

neuro_curr_par1_t2 ~~ neuro_curr_par1_t2
neuro_curr_par2_t2 ~~ neuro_curr_par2_t2
neuro_curr_par3_t2 ~~ neuro_curr_par3_t2

# Latent Variable Means
neuro_curr1 ~ 0*1
neuro_curr2 ~ 1

# Latent Variable Variances and Covariance
neuro_curr1 ~~ 1*neuro_curr1
neuro_curr2 ~~ neuro_curr2
neuro_curr1 ~~ neuro_curr2
'
fit_weak_neuro_curr <- cfa(weak_neuro_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_weak_neuro_curr, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_neuro_curr <- '
# Define the latent factors
neuro_curr1 =~ NA*neuro_curr_par1_t1 + lambda1*neuro_curr_par1_t1 + lambda2*neuro_curr_par2_t1 + lambda3*neuro_curr_par3_t1
neuro_curr2 =~ NA*neuro_curr_par1_t2 + lambda1*neuro_curr_par1_t2 + lambda2*neuro_curr_par2_t2 + lambda3*neuro_curr_par3_t2

# Intercepts
neuro_curr_par1_t1 ~ i1*1
neuro_curr_par2_t1 ~ i2*1
neuro_curr_par3_t1 ~ i3*1

neuro_curr_par1_t2 ~ i1*1
neuro_curr_par2_t2 ~ i2*1
neuro_curr_par3_t2 ~ i3*1

# Unique Variances
neuro_curr_par1_t1 ~~ neuro_curr_par1_t1
neuro_curr_par2_t1 ~~ neuro_curr_par2_t1
neuro_curr_par3_t1 ~~ neuro_curr_par3_t1

neuro_curr_par1_t2 ~~ neuro_curr_par1_t2
neuro_curr_par2_t2 ~~ neuro_curr_par2_t2
neuro_curr_par3_t2 ~~ neuro_curr_par3_t2

# Latent Variable Means
neuro_curr1 ~ 0*1
neuro_curr2 ~ 1

# Latent Variable Variances and Covariance
neuro_curr1 ~~ 1*neuro_curr1
neuro_curr2 ~~ neuro_curr2
neuro_curr1 ~~ neuro_curr2
'
fit_strong_neuro_curr <- cfa(strong_neuro_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_strong_neuro_curr, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_neuro_curr <- '
# Define the latent factors
neuro_curr1 =~ NA*neuro_curr_par1_t1 + lambda1*neuro_curr_par1_t1 + lambda2*neuro_curr_par2_t1 + lambda3*neuro_curr_par3_t1
neuro_curr2 =~ NA*neuro_curr_par1_t2 + lambda1*neuro_curr_par1_t2 + lambda2*neuro_curr_par2_t2 + lambda3*neuro_curr_par3_t2

# Intercepts
neuro_curr_par1_t1 ~ i1*1
neuro_curr_par2_t1 ~ i2*1
neuro_curr_par3_t1 ~ i3*1

neuro_curr_par1_t2 ~ i1*1
neuro_curr_par2_t2 ~ i2*1
neuro_curr_par3_t2 ~ i3*1

# Unique Variances
neuro_curr_par1_t1 ~~ u1*neuro_curr_par1_t1
neuro_curr_par2_t1 ~~ u2*neuro_curr_par2_t1
neuro_curr_par3_t1 ~~ u3*neuro_curr_par3_t1

neuro_curr_par1_t2 ~~ u1*neuro_curr_par1_t2
neuro_curr_par2_t2 ~~ u2*neuro_curr_par2_t2
neuro_curr_par3_t2 ~~ u3*neuro_curr_par3_t2

# Latent Variable Means
neuro_curr1 ~ 0*1
neuro_curr2 ~ 1

# Latent Variable Variances and Covariance
neuro_curr1 ~~ 1*neuro_curr1
neuro_curr2 ~~ neuro_curr2
neuro_curr1 ~~ neuro_curr2
'
fit_strict_neuro_curr <- cfa(strict_neuro_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_strict_neuro_curr, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_neuro_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_neuro_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_neuro_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_neuro_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>
1 configural   530    19  141. 5055. 5136. 0.959 0.923 0.177 0.0213
2 weak         530    17  142. 5052. 5124. 0.959 0.939 0.158 0.0238
3 strong       530    15  149. 5055. 5119. 0.958 0.947 0.147 0.0231
4 strict       530    12  151. 5051. 5102. 0.958 0.958 0.131 0.0242
# chi-square difference test for nested models 
anova(fit_configural_neuro_curr, fit_weak_neuro_curr)

Chi-Squared Difference Test

                          Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_neuro_curr  8 5054.9 5136.1 141.36                         
fit_weak_neuro_curr       10 5051.5 5124.2 141.97    0.60831     0       2
                          Pr(>Chisq)
fit_configural_neuro_curr           
fit_weak_neuro_curr           0.7377
anova(fit_weak_neuro_curr, fit_strong_neuro_curr)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff    RMSEA Df diff
fit_weak_neuro_curr   10 5051.5 5124.2 141.97                            
fit_strong_neuro_curr 12 5054.9 5119.0 149.40     7.4313 0.071581       2
                      Pr(>Chisq)  
fit_weak_neuro_curr               
fit_strong_neuro_curr    0.02434 *
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
anova(fit_strong_neuro_curr, fit_strict_neuro_curr)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_strong_neuro_curr 12 5054.9 5119.0 149.40                         
fit_strict_neuro_curr 15 5051.0 5102.3 151.44     2.0399     0       3
                      Pr(>Chisq)
fit_strong_neuro_curr           
fit_strict_neuro_curr     0.5642

Good model fit across all stages of measurement invariance (except for RMSEA). Chi^2 tests indicate that weak measurement invariance is given (at p < .05; strict at p < .01).

4.2.8 Neuroticism: ideal personality

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_neuro_ideal <- '
# Define the latent factors
neuro_ideal1 =~ NA*neuro_ideal_par1_t1 + lambda1*neuro_ideal_par1_t1 + neuro_ideal_par2_t1 + neuro_ideal_par3_t1
neuro_ideal2 =~ NA*neuro_ideal_par1_t2 + lambda1*neuro_ideal_par1_t2 + neuro_ideal_par2_t2 + neuro_ideal_par3_t2

# Intercepts
neuro_ideal_par1_t1 ~ i1*1
neuro_ideal_par2_t1 ~ 1
neuro_ideal_par3_t1 ~ 1

neuro_ideal_par1_t2 ~ i1*1
neuro_ideal_par2_t2 ~ 1
neuro_ideal_par3_t2 ~ 1

# Unique Variances
neuro_ideal_par1_t1 ~~ neuro_ideal_par1_t1
neuro_ideal_par2_t1 ~~ neuro_ideal_par2_t1
neuro_ideal_par3_t1 ~~ neuro_ideal_par3_t1

neuro_ideal_par1_t2 ~~ neuro_ideal_par1_t2
neuro_ideal_par2_t2 ~~ neuro_ideal_par2_t2
neuro_ideal_par3_t2 ~~ neuro_ideal_par3_t2

# Latent Variable Means
neuro_ideal1 ~ 0*1
neuro_ideal2 ~ 1

# Latent Variable Variances and Covariance
neuro_ideal1 ~~ 1*neuro_ideal1
neuro_ideal2 ~~ neuro_ideal2
neuro_ideal1 ~~ neuro_ideal2
'
fit_configural_neuro_ideal <- cfa(configural_neuro_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_configural_neuro_ideal, fit.measures = TRUE)

# Weak invariance model
weak_neuro_ideal <- '
# Define the latent factors
neuro_ideal1 =~ NA*neuro_ideal_par1_t1 + lambda1*neuro_ideal_par1_t1 + lambda2*neuro_ideal_par2_t1 + lambda3*neuro_ideal_par3_t1
neuro_ideal2 =~ NA*neuro_ideal_par1_t2 + lambda1*neuro_ideal_par1_t2 + lambda2*neuro_ideal_par2_t2 + lambda3*neuro_ideal_par3_t2

# Intercepts
neuro_ideal_par1_t1 ~ i1*1
neuro_ideal_par2_t1 ~ 1
neuro_ideal_par3_t1 ~ 1

neuro_ideal_par1_t2 ~ i1*1
neuro_ideal_par2_t2 ~ 1
neuro_ideal_par3_t2 ~ 1

# Unique Variances
neuro_ideal_par1_t1 ~~ neuro_ideal_par1_t1
neuro_ideal_par2_t1 ~~ neuro_ideal_par2_t1
neuro_ideal_par3_t1 ~~ neuro_ideal_par3_t1

neuro_ideal_par1_t2 ~~ neuro_ideal_par1_t2
neuro_ideal_par2_t2 ~~ neuro_ideal_par2_t2
neuro_ideal_par3_t2 ~~ neuro_ideal_par3_t2

# Latent Variable Means
neuro_ideal1 ~ 0*1
neuro_ideal2 ~ 1

# Latent Variable Variances and Covariance
neuro_ideal1 ~~ 1*neuro_ideal1
neuro_ideal2 ~~ neuro_ideal2
neuro_ideal1 ~~ neuro_ideal2
'
fit_weak_neuro_ideal <- cfa(weak_neuro_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_weak_neuro_ideal, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_neuro_ideal <- '
# Define the latent factors
neuro_ideal1 =~ NA*neuro_ideal_par1_t1 + lambda1*neuro_ideal_par1_t1 + lambda2*neuro_ideal_par2_t1 + lambda3*neuro_ideal_par3_t1
neuro_ideal2 =~ NA*neuro_ideal_par1_t2 + lambda1*neuro_ideal_par1_t2 + lambda2*neuro_ideal_par2_t2 + lambda3*neuro_ideal_par3_t2

# Intercepts
neuro_ideal_par1_t1 ~ i1*1
neuro_ideal_par2_t1 ~ i2*1
neuro_ideal_par3_t1 ~ i3*1

neuro_ideal_par1_t2 ~ i1*1
neuro_ideal_par2_t2 ~ i2*1
neuro_ideal_par3_t2 ~ i3*1

# Unique Variances
neuro_ideal_par1_t1 ~~ neuro_ideal_par1_t1
neuro_ideal_par2_t1 ~~ neuro_ideal_par2_t1
neuro_ideal_par3_t1 ~~ neuro_ideal_par3_t1

neuro_ideal_par1_t2 ~~ neuro_ideal_par1_t2
neuro_ideal_par2_t2 ~~ neuro_ideal_par2_t2
neuro_ideal_par3_t2 ~~ neuro_ideal_par3_t2

# Latent Variable Means
neuro_ideal1 ~ 0*1
neuro_ideal2 ~ 1

# Latent Variable Variances and Covariance
neuro_ideal1 ~~ 1*neuro_ideal1
neuro_ideal2 ~~ neuro_ideal2
neuro_ideal1 ~~ neuro_ideal2
'
fit_strong_neuro_ideal <- cfa(strong_neuro_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_strong_neuro_ideal, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_neuro_ideal <- '
# Define the latent factors
neuro_ideal1 =~ NA*neuro_ideal_par1_t1 + lambda1*neuro_ideal_par1_t1 + lambda2*neuro_ideal_par2_t1 + lambda3*neuro_ideal_par3_t1
neuro_ideal2 =~ NA*neuro_ideal_par1_t2 + lambda1*neuro_ideal_par1_t2 + lambda2*neuro_ideal_par2_t2 + lambda3*neuro_ideal_par3_t2

# Intercepts
neuro_ideal_par1_t1 ~ i1*1
neuro_ideal_par2_t1 ~ i2*1
neuro_ideal_par3_t1 ~ i3*1

neuro_ideal_par1_t2 ~ i1*1
neuro_ideal_par2_t2 ~ i2*1
neuro_ideal_par3_t2 ~ i3*1

# Unique Variances
neuro_ideal_par1_t1 ~~ u1*neuro_ideal_par1_t1
neuro_ideal_par2_t1 ~~ u2*neuro_ideal_par2_t1
neuro_ideal_par3_t1 ~~ u3*neuro_ideal_par3_t1

neuro_ideal_par1_t2 ~~ u1*neuro_ideal_par1_t2
neuro_ideal_par2_t2 ~~ u2*neuro_ideal_par2_t2
neuro_ideal_par3_t2 ~~ u3*neuro_ideal_par3_t2

# Latent Variable Means
neuro_ideal1 ~ 0*1
neuro_ideal2 ~ 1

# Latent Variable Variances and Covariance
neuro_ideal1 ~~ 1*neuro_ideal1
neuro_ideal2 ~~ neuro_ideal2
neuro_ideal1 ~~ neuro_ideal2
'
fit_strict_neuro_ideal <- cfa(strict_neuro_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_strict_neuro_ideal, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_neuro_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_neuro_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_neuro_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_neuro_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli  rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>  <dbl>
1 configural   175    19  10.6 1373. 1433. 0.990 0.982 0.0427 0.0314
2 weak         175    17  10.6 1369. 1423. 0.998 0.997 0.0180 0.0312
3 strong       175    15  19.5 1374. 1421. 0.972 0.965 0.0598 0.0410
4 strict       175    12  26.8 1375. 1413. 0.955 0.955 0.0671 0.0770
# chi-square difference test for nested models 
anova(fit_configural_neuro_ideal, fit_weak_neuro_ideal)

Chi-Squared Difference Test

                           Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_neuro_ideal  8 1372.8 1432.9 10.552                         
fit_weak_neuro_ideal       10 1368.8 1422.6 10.570   0.018348     0       2
                           Pr(>Chisq)
fit_configural_neuro_ideal           
fit_weak_neuro_ideal           0.9909
anova(fit_weak_neuro_ideal, fit_strong_neuro_ideal)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff   RMSEA Df diff
fit_weak_neuro_ideal   10 1368.8 1422.6 10.570                           
fit_strong_neuro_ideal 12 1373.8 1421.2 19.515     8.9452 0.14087       2
                       Pr(>Chisq)  
fit_weak_neuro_ideal               
fit_strong_neuro_ideal    0.01142 *
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
anova(fit_strong_neuro_ideal, fit_strict_neuro_ideal)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff    RMSEA Df diff
fit_strong_neuro_ideal 12 1373.8 1421.2 19.515                            
fit_strict_neuro_ideal 15 1375.1 1413.0 26.815     7.2996 0.090497       3
                       Pr(>Chisq)  
fit_strong_neuro_ideal             
fit_strict_neuro_ideal    0.06294 .
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Good model fit across all stages of measurement invariance. Chi^2 tests indicate that weak measurement invariance is given (at p < .05; strict at p < .01).

4.2.9 Openness: current personality

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_openn_curr <- '
# Define the latent factors
openn_curr1 =~ NA*openn_curr_par1_t1 + lambda1*openn_curr_par1_t1 + openn_curr_par2_t1 + openn_curr_par3_t1
openn_curr2 =~ NA*openn_curr_par1_t2 + lambda1*openn_curr_par1_t2 + openn_curr_par2_t2 + openn_curr_par3_t2

# Intercepts
openn_curr_par1_t1 ~ i1*1
openn_curr_par2_t1 ~ 1
openn_curr_par3_t1 ~ 1

openn_curr_par1_t2 ~ i1*1
openn_curr_par2_t2 ~ 1
openn_curr_par3_t2 ~ 1

# Unique Variances
openn_curr_par1_t1 ~~ openn_curr_par1_t1
openn_curr_par2_t1 ~~ openn_curr_par2_t1
openn_curr_par3_t1 ~~ openn_curr_par3_t1

openn_curr_par1_t2 ~~ openn_curr_par1_t2
openn_curr_par2_t2 ~~ openn_curr_par2_t2
openn_curr_par3_t2 ~~ openn_curr_par3_t2

# Latent Variable Means
openn_curr1 ~ 0*1
openn_curr2 ~ 1

# Latent Variable Variances and Covariance
openn_curr1 ~~ 1*openn_curr1
openn_curr2 ~~ openn_curr2
openn_curr1 ~~ openn_curr2
'
fit_configural_openn_curr <- cfa(configural_openn_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_configural_openn_curr, fit.measures = TRUE)

# Weak invariance model
weak_openn_curr <- '
# Define the latent factors
openn_curr1 =~ NA*openn_curr_par1_t1 + lambda1*openn_curr_par1_t1 + lambda2*openn_curr_par2_t1 + lambda3*openn_curr_par3_t1
openn_curr2 =~ NA*openn_curr_par1_t2 + lambda1*openn_curr_par1_t2 + lambda2*openn_curr_par2_t2 + lambda3*openn_curr_par3_t2

# Intercepts
openn_curr_par1_t1 ~ i1*1
openn_curr_par2_t1 ~ 1
openn_curr_par3_t1 ~ 1

openn_curr_par1_t2 ~ i1*1
openn_curr_par2_t2 ~ 1
openn_curr_par3_t2 ~ 1

# Unique Variances
openn_curr_par1_t1 ~~ openn_curr_par1_t1
openn_curr_par2_t1 ~~ openn_curr_par2_t1
openn_curr_par3_t1 ~~ openn_curr_par3_t1

openn_curr_par1_t2 ~~ openn_curr_par1_t2
openn_curr_par2_t2 ~~ openn_curr_par2_t2
openn_curr_par3_t2 ~~ openn_curr_par3_t2

# Latent Variable Means
openn_curr1 ~ 0*1
openn_curr2 ~ 1

# Latent Variable Variances and Covariance
openn_curr1 ~~ 1*openn_curr1
openn_curr2 ~~ openn_curr2
openn_curr1 ~~ openn_curr2
'
fit_weak_openn_curr <- cfa(weak_openn_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_weak_openn_curr, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_openn_curr <- '
# Define the latent factors
openn_curr1 =~ NA*openn_curr_par1_t1 + lambda1*openn_curr_par1_t1 + lambda2*openn_curr_par2_t1 + lambda3*openn_curr_par3_t1
openn_curr2 =~ NA*openn_curr_par1_t2 + lambda1*openn_curr_par1_t2 + lambda2*openn_curr_par2_t2 + lambda3*openn_curr_par3_t2

# Intercepts
openn_curr_par1_t1 ~ i1*1
openn_curr_par2_t1 ~ i2*1
openn_curr_par3_t1 ~ i3*1

openn_curr_par1_t2 ~ i1*1
openn_curr_par2_t2 ~ i2*1
openn_curr_par3_t2 ~ i3*1

# Unique Variances
openn_curr_par1_t1 ~~ openn_curr_par1_t1
openn_curr_par2_t1 ~~ openn_curr_par2_t1
openn_curr_par3_t1 ~~ openn_curr_par3_t1

openn_curr_par1_t2 ~~ openn_curr_par1_t2
openn_curr_par2_t2 ~~ openn_curr_par2_t2
openn_curr_par3_t2 ~~ openn_curr_par3_t2

# Latent Variable Means
openn_curr1 ~ 0*1
openn_curr2 ~ 1

# Latent Variable Variances and Covariance
openn_curr1 ~~ 1*openn_curr1
openn_curr2 ~~ openn_curr2
openn_curr1 ~~ openn_curr2
'
fit_strong_openn_curr <- cfa(strong_openn_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_strong_openn_curr, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_openn_curr <- '
# Define the latent factors
openn_curr1 =~ NA*openn_curr_par1_t1 + lambda1*openn_curr_par1_t1 + lambda2*openn_curr_par2_t1 + lambda3*openn_curr_par3_t1
openn_curr2 =~ NA*openn_curr_par1_t2 + lambda1*openn_curr_par1_t2 + lambda2*openn_curr_par2_t2 + lambda3*openn_curr_par3_t2

# Intercepts
openn_curr_par1_t1 ~ i1*1
openn_curr_par2_t1 ~ i2*1
openn_curr_par3_t1 ~ i3*1

openn_curr_par1_t2 ~ i1*1
openn_curr_par2_t2 ~ i2*1
openn_curr_par3_t2 ~ i3*1

# Unique Variances
openn_curr_par1_t1 ~~ u1*openn_curr_par1_t1
openn_curr_par2_t1 ~~ u2*openn_curr_par2_t1
openn_curr_par3_t1 ~~ u3*openn_curr_par3_t1

openn_curr_par1_t2 ~~ u1*openn_curr_par1_t2
openn_curr_par2_t2 ~~ u2*openn_curr_par2_t2
openn_curr_par3_t2 ~~ u3*openn_curr_par3_t2

# Latent Variable Means
openn_curr1 ~ 0*1
openn_curr2 ~ 1

# Latent Variable Variances and Covariance
openn_curr1 ~~ 1*openn_curr1
openn_curr2 ~~ openn_curr2
openn_curr1 ~~ openn_curr2
'
fit_strict_openn_curr <- cfa(strict_openn_curr, data = df_sbsa3_wide_pers, mimic = "mplus", missing="ML")
summary(fit_strict_openn_curr, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_openn_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_openn_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_openn_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_openn_curr) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>
1 configural   530    19  365. 5357. 5438. 0.810 0.644 0.290 0.0789
2 weak         530    17  365. 5353. 5426. 0.811 0.716 0.259 0.0789
3 strong       530    15  368. 5352. 5416. 0.811 0.763 0.237 0.0799
4 strict       530    12  368. 5346. 5398. 0.812 0.812 0.211 0.0788
# chi-square difference test for nested models 
anova(fit_configural_openn_curr, fit_weak_openn_curr)

Chi-Squared Difference Test

                          Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_openn_curr  8 5356.7 5437.9 364.87                         
fit_weak_openn_curr       10 5352.9 5425.6 365.06    0.19134     0       2
                          Pr(>Chisq)
fit_configural_openn_curr           
fit_weak_openn_curr           0.9088
anova(fit_weak_openn_curr, fit_strong_openn_curr)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff    RMSEA Df diff
fit_weak_openn_curr   10 5352.9 5425.6 365.06                            
fit_strong_openn_curr 12 5351.6 5415.7 367.73     2.6715 0.025169       2
                      Pr(>Chisq)
fit_weak_openn_curr             
fit_strong_openn_curr      0.263
anova(fit_strong_openn_curr, fit_strict_openn_curr)

Chi-Squared Difference Test

                      Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_strong_openn_curr 12 5351.6 5415.7 367.73                         
fit_strict_openn_curr 15 5346.3 5397.6 368.41    0.68078     0       3
                      Pr(>Chisq)
fit_strong_openn_curr           
fit_strict_openn_curr     0.8777

Model fit not satisfactory across all stages of measurement invariance. Chi^2 tests indicate that strict measurement invariance is given.

4.2.10 Openness: ideal personality

Fit model:

Show the code
# Code snippets adapted from https://quantdev.ssri.psu.edu/sites/qdev/files/LongitudinalMeasurementInvariance_2017_1108.html

# Configural invariance model
configural_openn_ideal <- '
# Define the latent factors
openn_ideal1 =~ NA*openn_ideal_par1_t1 + lambda1*openn_ideal_par1_t1 + openn_ideal_par2_t1 + openn_ideal_par3_t1
openn_ideal2 =~ NA*openn_ideal_par1_t2 + lambda1*openn_ideal_par1_t2 + openn_ideal_par2_t2 + openn_ideal_par3_t2

# Intercepts
openn_ideal_par1_t1 ~ i1*1
openn_ideal_par2_t1 ~ 1
openn_ideal_par3_t1 ~ 1

openn_ideal_par1_t2 ~ i1*1
openn_ideal_par2_t2 ~ 1
openn_ideal_par3_t2 ~ 1

# Unique Variances
openn_ideal_par1_t1 ~~ openn_ideal_par1_t1
openn_ideal_par2_t1 ~~ openn_ideal_par2_t1
openn_ideal_par3_t1 ~~ openn_ideal_par3_t1

openn_ideal_par1_t2 ~~ openn_ideal_par1_t2
openn_ideal_par2_t2 ~~ openn_ideal_par2_t2
openn_ideal_par3_t2 ~~ openn_ideal_par3_t2

# Latent Variable Means
openn_ideal1 ~ 0*1
openn_ideal2 ~ 1

# Latent Variable Variances and Covariance
openn_ideal1 ~~ 1*openn_ideal1
openn_ideal2 ~~ openn_ideal2
openn_ideal1 ~~ openn_ideal2
'
fit_configural_openn_ideal <- cfa(configural_openn_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_configural_openn_ideal, fit.measures = TRUE)

# Weak invariance model
weak_openn_ideal <- '
# Define the latent factors
openn_ideal1 =~ NA*openn_ideal_par1_t1 + lambda1*openn_ideal_par1_t1 + lambda2*openn_ideal_par2_t1 + lambda3*openn_ideal_par3_t1
openn_ideal2 =~ NA*openn_ideal_par1_t2 + lambda1*openn_ideal_par1_t2 + lambda2*openn_ideal_par2_t2 + lambda3*openn_ideal_par3_t2

# Intercepts
openn_ideal_par1_t1 ~ i1*1
openn_ideal_par2_t1 ~ 1
openn_ideal_par3_t1 ~ 1

openn_ideal_par1_t2 ~ i1*1
openn_ideal_par2_t2 ~ 1
openn_ideal_par3_t2 ~ 1

# Unique Variances
openn_ideal_par1_t1 ~~ openn_ideal_par1_t1
openn_ideal_par2_t1 ~~ openn_ideal_par2_t1
openn_ideal_par3_t1 ~~ openn_ideal_par3_t1

openn_ideal_par1_t2 ~~ openn_ideal_par1_t2
openn_ideal_par2_t2 ~~ openn_ideal_par2_t2
openn_ideal_par3_t2 ~~ openn_ideal_par3_t2

# Latent Variable Means
openn_ideal1 ~ 0*1
openn_ideal2 ~ 1

# Latent Variable Variances and Covariance
openn_ideal1 ~~ 1*openn_ideal1
openn_ideal2 ~~ openn_ideal2
openn_ideal1 ~~ openn_ideal2
'
fit_weak_openn_ideal <- cfa(weak_openn_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_weak_openn_ideal, fit.measures = TRUE)

# Strong invariance model (additional constraints on manifest intercepts)
strong_openn_ideal <- '
# Define the latent factors
openn_ideal1 =~ NA*openn_ideal_par1_t1 + lambda1*openn_ideal_par1_t1 + lambda2*openn_ideal_par2_t1 + lambda3*openn_ideal_par3_t1
openn_ideal2 =~ NA*openn_ideal_par1_t2 + lambda1*openn_ideal_par1_t2 + lambda2*openn_ideal_par2_t2 + lambda3*openn_ideal_par3_t2

# Intercepts
openn_ideal_par1_t1 ~ i1*1
openn_ideal_par2_t1 ~ i2*1
openn_ideal_par3_t1 ~ i3*1

openn_ideal_par1_t2 ~ i1*1
openn_ideal_par2_t2 ~ i2*1
openn_ideal_par3_t2 ~ i3*1

# Unique Variances
openn_ideal_par1_t1 ~~ openn_ideal_par1_t1
openn_ideal_par2_t1 ~~ openn_ideal_par2_t1
openn_ideal_par3_t1 ~~ openn_ideal_par3_t1

openn_ideal_par1_t2 ~~ openn_ideal_par1_t2
openn_ideal_par2_t2 ~~ openn_ideal_par2_t2
openn_ideal_par3_t2 ~~ openn_ideal_par3_t2

# Latent Variable Means
openn_ideal1 ~ 0*1
openn_ideal2 ~ 1

# Latent Variable Variances and Covariance
openn_ideal1 ~~ 1*openn_ideal1
openn_ideal2 ~~ openn_ideal2
openn_ideal1 ~~ openn_ideal2
'
fit_strong_openn_ideal <- cfa(strong_openn_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_strong_openn_ideal, fit.measures = TRUE)

# Strict invariance model (additional constraints on manifest variances)
strict_openn_ideal <- '
# Define the latent factors
openn_ideal1 =~ NA*openn_ideal_par1_t1 + lambda1*openn_ideal_par1_t1 + lambda2*openn_ideal_par2_t1 + lambda3*openn_ideal_par3_t1
openn_ideal2 =~ NA*openn_ideal_par1_t2 + lambda1*openn_ideal_par1_t2 + lambda2*openn_ideal_par2_t2 + lambda3*openn_ideal_par3_t2

# Intercepts
openn_ideal_par1_t1 ~ i1*1
openn_ideal_par2_t1 ~ i2*1
openn_ideal_par3_t1 ~ i3*1

openn_ideal_par1_t2 ~ i1*1
openn_ideal_par2_t2 ~ i2*1
openn_ideal_par3_t2 ~ i3*1

# Unique Variances
openn_ideal_par1_t1 ~~ u1*openn_ideal_par1_t1
openn_ideal_par2_t1 ~~ u2*openn_ideal_par2_t1
openn_ideal_par3_t1 ~~ u3*openn_ideal_par3_t1

openn_ideal_par1_t2 ~~ u1*openn_ideal_par1_t2
openn_ideal_par2_t2 ~~ u2*openn_ideal_par2_t2
openn_ideal_par3_t2 ~~ u3*openn_ideal_par3_t2

# Latent Variable Means
openn_ideal1 ~ 0*1
openn_ideal2 ~ 1

# Latent Variable Variances and Covariance
openn_ideal1 ~~ 1*openn_ideal1
openn_ideal2 ~~ openn_ideal2
openn_ideal1 ~~ openn_ideal2
'
fit_strict_openn_ideal <- cfa(strict_openn_ideal, data = df_sbsa3_wide_pers %>% filter(group=="Group 1"), mimic = "mplus", missing="ML")
summary(fit_strict_openn_ideal, fit.measures = TRUE)

Results summary:

# compare model fit
bind_rows(broom::glance(fit_configural_openn_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_weak_openn_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strong_openn_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr),
          broom::glance(fit_strict_openn_ideal) %>% 
            select(nobs, npar, chisq, AIC, BIC, cfi, tli, rmsea, srmr)) %>% 
  mutate(model = c("configural", "weak", "strong", "strict")) %>% 
  select(model, everything())
# A tibble: 4 × 10
  model       nobs  npar chisq   AIC   BIC   cfi   tli  rmsea   srmr
  <chr>      <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl>  <dbl>
1 configural   175    19  35.9 1696. 1756. 0.922 0.853 0.141  0.0487
2 weak         175    17  37.6 1693. 1747. 0.923 0.884 0.126  0.0705
3 strong       175    15  38.2 1690. 1738. 0.927 0.908 0.112  0.0658
4 strict       175    12  40.4 1686. 1724. 0.929 0.929 0.0984 0.0786
# chi-square difference test for nested models 
anova(fit_configural_openn_ideal, fit_weak_openn_ideal)

Chi-Squared Difference Test

                           Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_configural_openn_ideal  8 1695.8 1755.9 35.920                         
fit_weak_openn_ideal       10 1693.4 1747.2 37.597     1.6775     0       2
                           Pr(>Chisq)
fit_configural_openn_ideal           
fit_weak_openn_ideal           0.4323
anova(fit_weak_openn_ideal, fit_strong_openn_ideal)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_weak_openn_ideal   10 1693.4 1747.2 37.597                         
fit_strong_openn_ideal 12 1690.0 1737.5 38.211    0.61394     0       2
                       Pr(>Chisq)
fit_weak_openn_ideal             
fit_strong_openn_ideal     0.7357
anova(fit_strong_openn_ideal, fit_strict_openn_ideal)

Chi-Squared Difference Test

                       Df    AIC    BIC  Chisq Chisq diff RMSEA Df diff
fit_strong_openn_ideal 12 1690.0 1737.5 38.211                         
fit_strict_openn_ideal 15 1686.2 1724.2 40.411     2.1997     0       3
                       Pr(>Chisq)
fit_strong_openn_ideal           
fit_strict_openn_ideal      0.532

Model fit satisfactory (except for RMSEA) across all stages of measurement invariance. Chi^2 tests indicate that strict measurement invariance is given.


5 Descriptives

5.1 Sample size

How many participants at each time point and in each condition?

df_sbsa3 %>% summarise(n_distinct(pid)) # N (after exclusions)
# A tibble: 1 × 1
  `n_distinct(pid)`
              <int>
1               530
df_sbsa3 %>% group_by(time, group) %>% tally()
# A tibble: 6 × 3
# Groups:   time [2]
   time group       n
  <dbl> <chr>   <int>
1     1 Group 1   174
2     1 Group 2   179
3     1 Group 3   176
4     2 Group 1   155
5     2 Group 2   152
6     2 Group 3   159

One in Group 1 only has T2 data.

5.2 Demographics

Gender and age distribution

# across all participants
df_sbsa3 %>% group_by(pid) %>% slice_head(n=1) %>% ungroup() %>% 
  mutate(gender_num = ifelse(gender=="Female", 1, 0)) %>% 
  summarise(n = n(), women = sum(gender_num), m_age = mean(age, na.rm=T), sd_age = sd(age, na.rm=T), min_age = min(age, na.rm=T), max_age = max(age, na.rm=T)) %>% kable(digits = 2)
n women m_age sd_age min_age max_age
530 265 31.02 9.59 19 70
# by group and time point
df_sbsa3 %>% group_by(time, group) %>% 
  summarise(n = n(), m_age = mean(age, na.rm=T), sd_age = sd(age, na.rm=T), min_age = min(age, na.rm=T), max_age = max(age, na.rm=T)) %>% kable(digits = 2)
time group n m_age sd_age min_age max_age
1 Group 1 174 31.46 9.54 19 66
1 Group 2 179 30.68 9.00 20 62
1 Group 3 176 30.93 10.26 19 70
2 Group 1 155 31.75 9.52 20 66
2 Group 2 152 31.32 9.23 20 62
2 Group 3 159 30.70 10.24 19 70
df_sbsa3 %>% group_by(time, group, gender) %>% 
  summarise(n = n(), m_age = mean(age, na.rm=T), sd_age = sd(age, na.rm=T), min_age = min(age, na.rm=T), max_age = max(age, na.rm=T)) %>% kable(digits = 2)
time group gender n m_age sd_age min_age max_age
1 Group 1 Female 88 32.11 10.17 21 66
1 Group 1 Male 86 30.79 8.86 19 66
1 Group 2 Female 90 30.84 8.08 20 52
1 Group 2 Male 89 30.52 9.87 20 62
1 Group 3 Female 87 29.91 9.82 21 66
1 Group 3 Male 89 31.92 10.64 19 70
2 Group 1 Female 81 32.20 10.09 21 66
2 Group 1 Male 74 31.26 8.90 20 66
2 Group 2 Female 81 30.89 8.03 20 52
2 Group 2 Male 71 31.80 10.45 20 62
2 Group 3 Female 81 29.83 9.91 21 66
2 Group 3 Male 78 31.62 10.56 19 70

Other demographics (from Prolific info):

# across all participants: ethnicity
df_sbsa3 %>% group_by(pid) %>% slice_head(n=1) %>% ungroup() %>% 
  group_by(ethnicity) %>% summarise(n = n()) %>% ungroup() %>% mutate(prop = round(n / sum(n) * 100, 3)) %>% 
  kable(digits = 1)
ethnicity n prop
Asian 17 3.2
Black 116 21.9
Mixed 35 6.6
Other 23 4.3
White 336 63.4
NA 3 0.6
# across all participants: country of residence
df_sbsa3 %>% group_by(pid) %>% slice_head(n=1) %>% ungroup() %>% 
  group_by(country) %>% summarise(n = n()) %>% ungroup() %>% 
  arrange(desc(n)) %>% mutate(prop = round(n / sum(n) * 100, 3)) %>% print(n=20) %>% kable(digits = 1)
# A tibble: 32 × 3
   country            n   prop
   <chr>          <int>  <dbl>
 1 South Africa     138 26.0  
 2 Portugal          89 16.8  
 3 Poland            69 13.0  
 4 Italy             41  7.74 
 5 Mexico            30  5.66 
 6 United Kingdom    24  4.53 
 7 Greece            19  3.58 
 8 Spain             17  3.21 
 9 Chile             16  3.02 
10 Hungary           13  2.45 
11 Canada             8  1.51 
12 Australia          6  1.13 
13 France             6  1.13 
14 Germany            6  1.13 
15 Ireland            6  1.13 
16 Finland            5  0.943
17 Latvia             5  0.943
18 Netherlands        5  0.943
19 Belgium            3  0.566
20 Israel             3  0.566
# ℹ 12 more rows
country n prop
South Africa 138 26.0
Portugal 89 16.8
Poland 69 13.0
Italy 41 7.7
Mexico 30 5.7
United Kingdom 24 4.5
Greece 19 3.6
Spain 17 3.2
Chile 16 3.0
Hungary 13 2.5
Canada 8 1.5
Australia 6 1.1
France 6 1.1
Germany 6 1.1
Ireland 6 1.1
Finland 5 0.9
Latvia 5 0.9
Netherlands 5 0.9
Belgium 3 0.6
Israel 3 0.6
Slovenia 3 0.6
Switzerland 3 0.6
Austria 2 0.4
Czech Republic 2 0.4
Estonia 2 0.4
Norway 2 0.4
Sweden 2 0.4
Iceland 1 0.2
India 1 0.2
Korea 1 0.2
New Zealand 1 0.2
NA 1 0.2
# across all participants: student status
df_sbsa3 %>% group_by(pid) %>% slice_head(n=1) %>% ungroup() %>% 
  group_by(student) %>% summarise(n = n()) %>% ungroup() %>% 
  arrange(desc(n)) %>% mutate(prop = round(n / sum(n) * 100, 3)) %>% kable(digits = 1)
student n prop
No 280 52.8
Yes 163 30.8
NA 87 16.4
# across all participants: employment status
df_sbsa3 %>% group_by(pid) %>% slice_head(n=1) %>% ungroup() %>% 
  group_by(employed) %>% summarise(n = n()) %>% ungroup() %>% 
  arrange(desc(n)) %>% mutate(prop = round(n / sum(n) * 100, 3)) %>% kable(digits = 1)
employed n prop
Full-Time 213 40.2
NA 96 18.1
Unemployed (and job seeking) 87 16.4
Part-Time 68 12.8
Other 33 6.2
Not in paid work (e.g. homemaker’, ’retired or disabled) 22 4.2
Due to start a new job within the next month 11 2.1

5.3 Internal consistencies

On the trait/dimensional level:

# created in study 3 cleaning script 
base::load("data/int_consist_traits_st3.rda")
kable(int_consist_traits_st3, digits = 3)
trait rel_alpha_current rel_alpha_ideal rel_omega_t_current rel_omega_t_ideal rel_omega_h_current rel_omega_h_ideal
lifesat 0.874 NA 0.895 NA 0.828 NA
meaning 0.918 NA 0.932 NA 0.875 NA
search 0.922 NA 0.942 NA 0.890 NA
selfes 0.909 NA 0.936 NA 0.833 NA
concept 0.893 NA 0.914 NA 0.860 NA
extra 0.862 0.730 0.893 0.773 0.688 0.480
agree 0.789 0.741 0.842 0.806 0.584 0.595
consc 0.871 0.732 0.896 0.796 0.683 0.579
neuro 0.913 0.749 0.928 0.803 0.757 0.562
openn 0.814 0.745 0.856 0.808 0.691 0.602

McDonald’s omega only really makes sense for the Big Five dimensions (with their three facets each) but I’ve added it for the well-being measures as well. Keep in mind that a few of the omega computations for the well-being measures only converged with error messages. These constructs were not neccessarily intended to be tested in this multidimensional manner for internal consistency.

On the facet level:

# created in study 3 cleaning script 
base::load("data/int_consist_facets_st3.rda")
kable(int_consist_facets_st3, digits = 3)
facet rel_alpha_current rel_alpha_ideal
socia 0.830 0.621
asser 0.748 0.546
energ 0.737 0.423
compa 0.544 0.386
respe 0.686 0.616
trust 0.677 0.612
organ 0.835 0.519
produ 0.778 0.622
respo 0.635 0.407
anxie 0.791 0.402
depre 0.836 0.508
volat 0.827 0.578
curio 0.635 0.534
aesth 0.646 0.415
imagi 0.730 0.618

5.4 Attrition analyses

Do participants who only took part in T1 differ from those taking part in T1 and T2?

Determine groups of “remainers” vs. “drop-outs” and run analyses:

Show the code
attrition_sample <- df_sbsa3 %>% 
  group_by(pid) %>% mutate(last_time = max(time)) %>% ungroup() %>% 
  filter(time==1) %>% 
  select(pid, last_time, 
         contains("_comb_"), c("swls", "meaning", "selfes", "concept"), # outcomes
         gender, age, ethnicity, student, employed) %>% # demographics
  mutate(remained = last_time - 1,
         female = if_else(gender=="Female", 1, 0, NA_real_),
         white = if_else(ethnicity=="White", 1, 0, NA_real_),
         black = if_else(ethnicity=="Black", 1, 0, NA_real_),
         student = if_else(student=="Yes", 1, 0, NA_real_),
         fulltime = if_else(employed=="Full-Time", 1, 0, NA_real_),
         parttime = if_else(employed=="Part-Time", 1, 0, NA_real_),
         notemployed = if_else(employed=="Unemployed (and job seeking)" | 
                                 employed=="Not in paid work (e.g. homemaker', 'retired or disabled)" | 
                                 employed=="Due to start a new job within the next month", 1, 0, NA_real_),) %>% 
  select(-c(last_time, na_comb_curr, na_comb_ideal, gender, ethnicity, employed))

attrition_analysis <- colnames(attrition_sample) %>%                                # Start with all column names
    setdiff( c("pid", "remained" ) ) %>%                 # ...that are not Sepal.Length
    rlang::syms() %>%                             # Convert them to symbols
    map( ~rlang::expr(lm(!!.x ~ remained,
                         data=attrition_sample)) ) %>%        # Create expressions
    map( eval.parent ) %>%                        # Evaluate expressions
    map( broom::tidy ) %>%                        # Tidy up the output
    bind_rows() %>%                               # Combine into a single data frame
    filter( term != "(Intercept)" ) %>%                # Drop all (Intercept) entries
    mutate(outcome = colnames(attrition_sample) %>% setdiff( c("pid", "remained" ) ))

Display significant differences:

attrition_analysis %>% filter(p.value < .05) %>% arrange(desc(abs(estimate)))
# A tibble: 4 × 6
  term     estimate std.error statistic p.value outcome        
  <chr>       <dbl>     <dbl>     <dbl>   <dbl> <chr>          
1 remained    0.230    0.0957      2.41 0.0164  compa_comb_curr
2 remained    0.179    0.0663      2.70 0.00724 female         
3 remained    0.142    0.0549      2.59 0.00985 black          
4 remained   -0.127    0.0640     -1.98 0.0482  white          
effectsize::cohens_d(compa_comb_curr ~ remained, data = attrition_sample)
Cohen's d |         95% CI
--------------------------
-0.32     | [-0.58, -0.06]

- Estimated using pooled SD.
effectsize::phi(table(attrition_sample$female, attrition_sample$remained), alternative = "two.sided")
Phi (adj.) |       95% CI
-------------------------
0.11       | [0.00, 0.20]
effectsize::phi(table(attrition_sample$black, attrition_sample$remained), alternative = "two.sided")
Phi (adj.) |       95% CI
-------------------------
0.10       | [0.00, 0.19]
effectsize::phi(table(attrition_sample$white, attrition_sample$remained), alternative = "two.sided")
Phi (adj.) |       95% CI
-------------------------
0.07       | [0.00, 0.17]

Participants who dropped out at T2 were less compassionate, d = -0.32, 95% CI [-0.58; -0.06], p = .016, less likely to be female, phi = .11, 95% CI [0; .20], p = .007, and Black, phi = .10, 95% CI [0; .19], p = .001, and more likely to be White, phi = .07, 95% CI [0; .17], p = .048.

Show all results:

attrition_analysis %>% arrange(desc(abs(estimate))) %>% print(n=200)
# A tibble: 52 × 6
   term      estimate std.error statistic p.value outcome         
   <chr>        <dbl>     <dbl>     <dbl>   <dbl> <chr>           
 1 remained  1.94        1.28    1.52     0.129   age             
 2 remained  0.285       0.198   1.44     0.150   meaning         
 3 remained  0.230       0.0957  2.41     0.0164  compa_comb_curr 
 4 remained -0.212       0.146  -1.46     0.147   anxie_comb_ideal
 5 remained  0.193       0.134   1.44     0.152   produ_comb_ideal
 6 remained  0.192       0.162   1.19     0.237   compa_comb_ideal
 7 remained  0.191       0.161   1.18     0.240   asser_comb_ideal
 8 remained -0.186       0.142  -1.31     0.192   volat_comb_ideal
 9 remained  0.179       0.0663  2.70     0.00724 female          
10 remained -0.177       0.113  -1.57     0.119   neuro_comb_ideal
11 remained  0.172       0.162   1.06     0.290   curio_comb_ideal
12 remained  0.171       0.112   1.53     0.127   aesth_comb_curr 
13 remained  0.164       0.109   1.50     0.135   consc_comb_ideal
14 remained  0.160       0.140   1.14     0.255   organ_comb_ideal
15 remained  0.159       0.117   1.37     0.173   concept         
16 remained -0.155       0.127  -1.22     0.222   anxie_comb_curr 
17 remained -0.148       0.138  -1.07     0.285   socia_comb_curr 
18 remained  0.146       0.0784  1.86     0.0628  agree_comb_curr 
19 remained  0.142       0.0549  2.59     0.00985 black           
20 remained  0.140       0.119   1.17     0.241   produ_comb_curr 
21 remained  0.137       0.139   0.985    0.326   respo_comb_ideal
22 remained  0.134       0.131   1.02     0.307   openn_comb_ideal
23 remained -0.134       0.137  -0.973    0.331   volat_comb_curr 
24 remained  0.133       0.0855  1.56     0.120   openn_comb_curr 
25 remained -0.133       0.128  -1.04     0.301   depre_comb_ideal
26 remained -0.130       0.0666 -1.96     0.0509  notemployed     
27 remained  0.129       0.172   0.750    0.454   imagi_comb_ideal
28 remained -0.129       0.178  -0.728    0.468   socia_comb_ideal
29 remained -0.127       0.0640 -1.98     0.0482  white           
30 remained  0.125       0.0919  1.36     0.175   respe_comb_curr 
31 remained  0.119       0.109   1.09     0.277   imagi_comb_curr 
32 remained -0.117       0.118  -0.987    0.324   neuro_comb_curr 
33 remained -0.110       0.120  -0.917    0.359   asser_comb_curr 
34 remained  0.109       0.0958  1.14     0.254   curio_comb_curr 
35 remained  0.102       0.164   0.618    0.537   aesth_comb_ideal
36 remained  0.101       0.134   0.751    0.453   swls            
37 remained  0.0853      0.121   0.703    0.483   agree_comb_ideal
38 remained  0.0834      0.109   0.764    0.445   trust_comb_curr 
39 remained -0.0672      0.101  -0.662    0.508   extra_comb_curr 
40 remained -0.0617      0.136  -0.454    0.650   depre_comb_curr 
41 remained  0.0601      0.118   0.508    0.612   selfes          
42 remained  0.0583      0.129   0.451    0.653   respe_comb_ideal
43 remained  0.0556      0.116   0.478    0.633   energ_comb_curr 
44 remained  0.0515      0.101   0.511    0.610   consc_comb_curr 
45 remained  0.0447      0.0543  0.822    0.412   parttime        
46 remained  0.0438      0.0747  0.586    0.558   fulltime        
47 remained  0.0365      0.143   0.256    0.798   energ_comb_ideal
48 remained  0.0326      0.125   0.261    0.795   extra_comb_ideal
49 remained  0.0143      0.101   0.141    0.888   respo_comb_curr 
50 remained -0.00952     0.0738 -0.129    0.897   student         
51 remained  0.00552     0.174   0.0317   0.975   trust_comb_ideal
52 remained  0.000109    0.136   0.000802 0.999   organ_comb_curr 

5.5 Descriptive plots: mean-level changes

5.5.1 Personality

Prepare data frame for plotting:

Show the code
df_manip_check_forplot <- df_sbsa3 %>% 
  mutate(time_d = time - 1) %>% 
  select(pid, time_d, group, contains("_comb_"), -c(na_comb_curr, na_comb_ideal)) %>% 
  pivot_longer(-c(pid, time_d, group), 
               cols_vary = "slowest",
               names_to = c("trait", "reference"),
               names_pattern = "(.*)_(.*)",
               values_to = "score") %>% 
  mutate(trait = gsub("_comb", "", trait),
         reference = ifelse(reference=="curr", "current", reference)) %>% 
  left_join(tibble(long = str_to_title(names(b5_vars)), trait = str_trunc(names(b5_vars), 5, ellipsis = ""))) %>% 
  select(-trait, trait = long, time = time_d) %>% 
  filter(!is.na(score)) %>% 
  group_by(trait, reference, group, time) %>% 
  summarise(n = n(), m_trait = mean(score), sd_trait = sd(score)) %>% 
  ungroup() %>% 
  mutate(Group = factor(group, levels = c("Group 1", "Group 2", "Group 3"), labels = c("Group 1", "Group 2", "Group 3")),
         Time = factor(time+1),
         Trait = factor(trait, 
                        levels = str_to_title(names(b5_vars))[c(1,6:8, 2,9:11, 3,12:14, 4,15:17, 5,18:20)], # group facets with each trait
                        labels = str_to_title(names(b5_vars))[c(1,6:8, 2,9:11, 3,12:14, 4,15:17, 5,18:20)]),
         Reference = factor(reference, levels = c("current", "ideal"), labels = c("Current Personality", "Ideal Personality")),
         lwr95 = m_trait - qnorm(0.975)*sd_trait/sqrt(n),
         upr95 = m_trait + qnorm(0.975)*sd_trait/sqrt(n)) %>% 
  select(-c(time, group, trait, reference))

Table of all outcomes:

Show the code
df_manip_check_forplot %>% arrange(Trait) %>% 
  mutate_if(is.numeric, round, digits=2) %>% 
  select(Trait, Reference, Group, Time, n, m_trait, lwr95, upr95, sd_trait) %>% 
  kable(digits = 2)
Trait Reference Group Time n m_trait lwr95 upr95 sd_trait
Extraversion Current Personality Group 1 1 174 2.83 2.72 2.94 0.75
Extraversion Current Personality Group 1 2 155 2.89 2.77 3.01 0.75
Extraversion Current Personality Group 2 1 179 2.92 2.81 3.03 0.76
Extraversion Current Personality Group 2 2 152 3.00 2.89 3.12 0.74
Extraversion Current Personality Group 3 1 176 3.07 2.96 3.18 0.76
Extraversion Current Personality Group 3 2 159 3.03 2.91 3.15 0.80
Extraversion Ideal Personality Group 1 1 174 3.92 3.84 4.00 0.52
Extraversion Ideal Personality Group 1 2 155 3.82 3.74 3.90 0.51
Sociability Current Personality Group 1 1 174 2.46 2.30 2.61 1.04
Sociability Current Personality Group 1 2 155 2.57 2.41 2.73 1.01
Sociability Current Personality Group 2 1 179 2.63 2.49 2.78 1.00
Sociability Current Personality Group 2 2 152 2.75 2.61 2.90 0.93
Sociability Current Personality Group 3 1 176 2.70 2.54 2.85 1.06
Sociability Current Personality Group 3 2 159 2.70 2.53 2.86 1.07
Sociability Ideal Personality Group 1 1 174 3.65 3.54 3.76 0.75
Sociability Ideal Personality Group 1 2 155 3.51 3.40 3.63 0.73
Assertiveness Current Personality Group 1 1 174 2.92 2.79 3.05 0.89
Assertiveness Current Personality Group 1 2 155 2.92 2.78 3.06 0.89
Assertiveness Current Personality Group 2 1 179 2.97 2.83 3.10 0.91
Assertiveness Current Personality Group 2 2 152 2.99 2.85 3.12 0.87
Assertiveness Current Personality Group 3 1 176 3.17 3.05 3.30 0.87
Assertiveness Current Personality Group 3 2 159 3.11 2.96 3.26 0.95
Assertiveness Ideal Personality Group 1 1 174 3.89 3.79 3.99 0.68
Assertiveness Ideal Personality Group 1 2 155 3.75 3.64 3.86 0.67
Energy Current Personality Group 1 1 174 3.12 2.99 3.24 0.84
Energy Current Personality Group 1 2 155 3.19 3.05 3.32 0.87
Energy Current Personality Group 2 1 179 3.16 3.03 3.29 0.87
Energy Current Personality Group 2 2 152 3.27 3.14 3.41 0.87
Energy Current Personality Group 3 1 176 3.34 3.20 3.47 0.89
Energy Current Personality Group 3 2 159 3.28 3.14 3.43 0.93
Energy Ideal Personality Group 1 1 174 4.22 4.13 4.31 0.60
Energy Ideal Personality Group 1 2 155 4.19 4.10 4.28 0.57
Agreeableness Current Personality Group 1 1 174 3.69 3.61 3.77 0.56
Agreeableness Current Personality Group 1 2 155 3.76 3.67 3.85 0.55
Agreeableness Current Personality Group 2 1 179 3.62 3.53 3.70 0.59
Agreeableness Current Personality Group 2 2 152 3.68 3.59 3.78 0.60
Agreeableness Current Personality Group 3 1 176 3.69 3.60 3.78 0.62
Agreeableness Current Personality Group 3 2 159 3.68 3.58 3.78 0.64
Agreeableness Ideal Personality Group 1 1 174 4.07 4.00 4.15 0.51
Agreeableness Ideal Personality Group 1 2 155 4.05 3.97 4.14 0.54
Compassion Current Personality Group 1 1 174 3.80 3.70 3.90 0.67
Compassion Current Personality Group 1 2 155 3.94 3.84 4.05 0.67
Compassion Current Personality Group 2 1 179 3.78 3.67 3.89 0.72
Compassion Current Personality Group 2 2 152 3.78 3.66 3.90 0.76
Compassion Current Personality Group 3 1 176 3.81 3.69 3.92 0.77
Compassion Current Personality Group 3 2 159 3.80 3.68 3.93 0.80
Compassion Ideal Personality Group 1 1 174 4.02 3.92 4.12 0.68
Compassion Ideal Personality Group 1 2 155 4.05 3.94 4.16 0.69
Respectfulness Current Personality Group 1 1 174 4.06 3.96 4.17 0.71
Respectfulness Current Personality Group 1 2 155 4.09 3.98 4.21 0.71
Respectfulness Current Personality Group 2 1 179 3.96 3.86 4.05 0.65
Respectfulness Current Personality Group 2 2 152 4.03 3.93 4.13 0.64
Respectfulness Current Personality Group 3 1 176 4.01 3.90 4.11 0.71
Respectfulness Current Personality Group 3 2 159 4.04 3.92 4.15 0.72
Respectfulness Ideal Personality Group 1 1 174 4.46 4.38 4.54 0.54
Respectfulness Ideal Personality Group 1 2 155 4.41 4.32 4.51 0.60
Trust Current Personality Group 1 1 174 3.21 3.09 3.33 0.81
Trust Current Personality Group 1 2 155 3.25 3.13 3.37 0.77
Trust Current Personality Group 2 1 179 3.11 2.99 3.23 0.80
Trust Current Personality Group 2 2 152 3.24 3.11 3.37 0.81
Trust Current Personality Group 3 1 176 3.25 3.13 3.38 0.84
Trust Current Personality Group 3 2 159 3.21 3.07 3.34 0.87
Trust Ideal Personality Group 1 1 174 3.73 3.62 3.84 0.73
Trust Ideal Personality Group 1 2 155 3.69 3.58 3.81 0.72
Conscientiousness Current Personality Group 1 1 174 3.33 3.22 3.44 0.76
Conscientiousness Current Personality Group 1 2 155 3.42 3.31 3.54 0.74
Conscientiousness Current Personality Group 2 1 179 3.36 3.26 3.47 0.72
Conscientiousness Current Personality Group 2 2 152 3.45 3.34 3.56 0.69
Conscientiousness Current Personality Group 3 1 176 3.54 3.43 3.66 0.77
Conscientiousness Current Personality Group 3 2 159 3.58 3.46 3.70 0.76
Conscientiousness Ideal Personality Group 1 1 174 4.39 4.33 4.46 0.46
Conscientiousness Ideal Personality Group 1 2 155 4.41 4.34 4.47 0.42
Organization Current Personality Group 1 1 174 3.41 3.25 3.56 1.05
Organization Current Personality Group 1 2 155 3.48 3.32 3.64 1.00
Organization Current Personality Group 2 1 179 3.45 3.30 3.59 1.00
Organization Current Personality Group 2 2 152 3.50 3.35 3.65 0.95
Organization Current Personality Group 3 1 176 3.68 3.54 3.83 0.99
Organization Current Personality Group 3 2 159 3.63 3.47 3.78 0.98
Organization Ideal Personality Group 1 1 174 4.42 4.33 4.50 0.59
Organization Ideal Personality Group 1 2 155 4.43 4.35 4.51 0.52
Productiveness Current Personality Group 1 1 174 3.03 2.89 3.16 0.93
Productiveness Current Personality Group 1 2 155 3.17 3.03 3.31 0.90
Productiveness Current Personality Group 2 1 179 3.16 3.04 3.28 0.82
Productiveness Current Personality Group 2 2 152 3.29 3.16 3.43 0.84
Productiveness Current Personality Group 3 1 176 3.35 3.21 3.48 0.91
Productiveness Current Personality Group 3 2 159 3.49 3.35 3.63 0.90
Productiveness Ideal Personality Group 1 1 174 4.51 4.42 4.59 0.57
Productiveness Ideal Personality Group 1 2 155 4.50 4.42 4.58 0.51
Responsibility Current Personality Group 1 1 174 3.56 3.45 3.67 0.77
Responsibility Current Personality Group 1 2 155 3.62 3.50 3.74 0.78
Responsibility Current Personality Group 2 1 179 3.49 3.37 3.60 0.77
Responsibility Current Personality Group 2 2 152 3.56 3.45 3.67 0.71
Responsibility Current Personality Group 3 1 176 3.60 3.49 3.71 0.74
Responsibility Current Personality Group 3 2 159 3.62 3.51 3.73 0.72
Responsibility Ideal Personality Group 1 1 174 4.26 4.17 4.35 0.58
Responsibility Ideal Personality Group 1 2 155 4.29 4.19 4.38 0.60
Neuroticism Current Personality Group 1 1 174 3.19 3.06 3.32 0.86
Neuroticism Current Personality Group 1 2 155 3.13 2.99 3.26 0.85
Neuroticism Current Personality Group 2 1 179 3.26 3.14 3.39 0.85
Neuroticism Current Personality Group 2 2 152 3.10 2.97 3.24 0.85
Neuroticism Current Personality Group 3 1 176 3.03 2.89 3.17 0.94
Neuroticism Current Personality Group 3 2 159 3.04 2.89 3.19 0.96
Neuroticism Ideal Personality Group 1 1 174 1.80 1.73 1.87 0.48
Neuroticism Ideal Personality Group 1 2 155 1.82 1.75 1.90 0.48
Anxiety Current Personality Group 1 1 174 3.53 3.39 3.67 0.94
Anxiety Current Personality Group 1 2 155 3.50 3.36 3.65 0.91
Anxiety Current Personality Group 2 1 179 3.60 3.47 3.73 0.90
Anxiety Current Personality Group 2 2 152 3.50 3.34 3.66 0.98
Anxiety Current Personality Group 3 1 176 3.41 3.26 3.56 1.00
Anxiety Current Personality Group 3 2 159 3.42 3.27 3.58 1.00
Anxiety Ideal Personality Group 1 1 174 1.90 1.81 1.99 0.62
Anxiety Ideal Personality Group 1 2 155 1.97 1.87 2.07 0.62
Depression Current Personality Group 1 1 174 3.09 2.94 3.24 1.00
Depression Current Personality Group 1 2 155 3.01 2.86 3.17 1.00
Depression Current Personality Group 2 1 179 3.18 3.03 3.32 0.98
Depression Current Personality Group 2 2 152 2.94 2.79 3.10 0.99
Depression Current Personality Group 3 1 176 2.83 2.67 2.98 1.05
Depression Current Personality Group 3 2 159 2.86 2.69 3.03 1.09
Depression Ideal Personality Group 1 1 174 1.51 1.43 1.59 0.54
Depression Ideal Personality Group 1 2 155 1.54 1.45 1.62 0.55
Volatility Current Personality Group 1 1 174 2.94 2.79 3.09 1.00
Volatility Current Personality Group 1 2 155 2.87 2.71 3.02 1.00
Volatility Current Personality Group 2 1 179 3.01 2.86 3.16 1.03
Volatility Current Personality Group 2 2 152 2.87 2.72 3.02 0.95
Volatility Current Personality Group 3 1 176 2.86 2.70 3.01 1.06
Volatility Current Personality Group 3 2 159 2.83 2.67 3.00 1.07
Volatility Ideal Personality Group 1 1 174 1.99 1.90 2.07 0.60
Volatility Ideal Personality Group 1 2 155 1.96 1.87 2.05 0.58
Openness Current Personality Group 1 1 174 3.74 3.64 3.83 0.61
Openness Current Personality Group 1 2 155 3.77 3.68 3.86 0.60
Openness Current Personality Group 2 1 179 3.70 3.61 3.80 0.67
Openness Current Personality Group 2 2 152 3.77 3.66 3.87 0.66
Openness Current Personality Group 3 1 176 3.83 3.74 3.93 0.64
Openness Current Personality Group 3 2 159 3.84 3.74 3.94 0.63
Openness Ideal Personality Group 1 1 174 4.12 4.04 4.20 0.55
Openness Ideal Personality Group 1 2 155 4.14 4.05 4.23 0.54
Curiosity Current Personality Group 1 1 174 3.88 3.77 3.98 0.69
Curiosity Current Personality Group 1 2 155 3.93 3.82 4.04 0.70
Curiosity Current Personality Group 2 1 179 3.88 3.77 3.99 0.74
Curiosity Current Personality Group 2 2 152 3.97 3.84 4.09 0.76
Curiosity Current Personality Group 3 1 176 4.01 3.90 4.11 0.72
Curiosity Current Personality Group 3 2 159 4.01 3.91 4.12 0.68
Curiosity Ideal Personality Group 1 1 174 4.15 4.05 4.25 0.68
Curiosity Ideal Personality Group 1 2 155 4.13 4.03 4.23 0.64
Aesthetic Current Personality Group 1 1 174 3.60 3.48 3.72 0.80
Aesthetic Current Personality Group 1 2 155 3.64 3.51 3.76 0.82
Aesthetic Current Personality Group 2 1 179 3.60 3.47 3.73 0.86
Aesthetic Current Personality Group 2 2 152 3.63 3.50 3.76 0.83
Aesthetic Current Personality Group 3 1 176 3.72 3.59 3.85 0.86
Aesthetic Current Personality Group 3 2 159 3.72 3.59 3.85 0.85
Aesthetic Ideal Personality Group 1 1 174 3.86 3.76 3.97 0.69
Aesthetic Ideal Personality Group 1 2 155 3.95 3.84 4.06 0.70
Imagination Current Personality Group 1 1 174 3.74 3.62 3.85 0.76
Imagination Current Personality Group 1 2 155 3.75 3.63 3.87 0.75
Imagination Current Personality Group 2 1 179 3.63 3.51 3.76 0.86
Imagination Current Personality Group 2 2 152 3.71 3.58 3.85 0.84
Imagination Current Personality Group 3 1 176 3.77 3.65 3.90 0.82
Imagination Current Personality Group 3 2 159 3.79 3.67 3.92 0.80
Imagination Ideal Personality Group 1 1 174 4.34 4.23 4.45 0.72
Imagination Ideal Personality Group 1 2 155 4.34 4.24 4.45 0.66

Plotting mean-level changes on a descriptive level: Big Five dimensions and facets

Show the code
ggplot(df_manip_check_forplot, 
       aes(y = m_trait, x = Time, shape = Group, color = Reference)) + 
  geom_point(position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(ymin = lwr95, ymax = upr95), width=.3, position=position_dodge(0.3)) +
  ylab("Mean Trait Level (95% CI)") + 
  facet_wrap( ~ Trait, ncol = 4) +
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting mean-level changes on a descriptive level: Extraversion

Show the code
ggplot(df_manip_check_forplot %>% filter(Trait=="Extraversion"), 
       aes(y = m_trait, x = Time, shape = Group, color = Reference)) + 
  geom_point(position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(ymin = lwr95, ymax = upr95), width=.3, position=position_dodge(0.3)) +
  ylab("Mean Trait Level (95% CI)") + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting mean-level changes on a descriptive level: Agreeableness

Show the code
ggplot(df_manip_check_forplot %>% filter(Trait=="Agreeableness"), 
       aes(y = m_trait, x = Time, shape = Group, color = Reference)) + 
  geom_point(position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(ymin = lwr95, ymax = upr95), width=.3, position=position_dodge(0.3)) +
  ylab("Mean Trait Level (95% CI)") + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting mean-level changes on a descriptive level: Conscientiousness

Show the code
ggplot(df_manip_check_forplot %>% filter(Trait=="Conscientiousness"), 
       aes(y = m_trait, x = Time, shape = Group, color = Reference)) + 
  geom_point(position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(ymin = lwr95, ymax = upr95), width=.3, position=position_dodge(0.3)) +
  ylab("Mean Trait Level (95% CI)") + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting mean-level changes on a descriptive level: Neuroticism

Show the code
ggplot(df_manip_check_forplot %>% filter(Trait=="Neuroticism"), 
       aes(y = m_trait, x = Time, shape = Group, color = Reference)) + 
  geom_point(position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(ymin = lwr95, ymax = upr95), width=.3, position=position_dodge(0.3)) +
  ylab("Mean Trait Level (95% CI)") + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting mean-level changes on a descriptive level: Openness

Show the code
ggplot(df_manip_check_forplot %>% filter(Trait=="Openness"), 
       aes(y = m_trait, x = Time, shape = Group, color = Reference)) + 
  geom_point(position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(ymin = lwr95, ymax = upr95), width=.3, position=position_dodge(0.3)) +
  ylab("Mean Trait Level (95% CI)") + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

5.5.2 Well-Being

Prepare data frame for plotting:

Show the code
df_manip_check_wb_forplot <- df_sbsa3 %>% 
  mutate(time_d = time - 1) %>% 
  select(pid, time_d, group, swls, meaning, search, selfes, concept) %>% 
  pivot_longer(-c(pid, time_d, group), 
               cols_vary = "slowest",
               names_to = c("trait"),
               values_to = "score") %>% 
  mutate(trait = case_when(trait=="swls" ~ "Life Satisfaction", trait=="meaning" ~ "Meaning in Life", 
                           trait=="search" ~ "Search for Meaning", 
                           trait=="selfes" ~ "Self-Esteem", trait=="concept" ~ "Self-Concept Clarity")) %>% 
  filter(!is.na(score)) %>% 
  group_by(trait, group, time_d) %>% 
  summarise(n = n(), m_trait = mean(score), sd_trait = sd(score)) %>% 
  ungroup() %>% 
  mutate(Group = factor(group, levels = c("Group 1", "Group 2", "Group 3"), labels = c("Group 1", "Group 2", "Group 3")),
         Time = factor(time_d+1),
         Trait = factor(trait, 
                        levels = c("Life Satisfaction", "Meaning in Life", "Search for Meaning", 
                                   "Self-Esteem", "Self-Concept Clarity"), # group facets with each trait
                        labels = c("Life Satisfaction", "Meaning in Life", "Search for Meaning", 
                                   "Self-Esteem", "Self-Concept Clarity")),
         lwr95 = m_trait - qnorm(0.975)*sd_trait/sqrt(n),
         upr95 = m_trait + qnorm(0.975)*sd_trait/sqrt(n)) %>% 
  select(-c(time_d, group, trait))

Table of all outcomes:

Show the code
df_manip_check_wb_forplot %>% arrange(Trait) %>% 
  mutate_if(is.numeric, round, digits=2) %>% 
  select(Trait, Group, Time, n, m_trait, lwr95, upr95, sd_trait) %>% 
  kable(digits = 2)
Trait Group Time n m_trait lwr95 upr95 sd_trait
Life Satisfaction Group 1 1 174 2.75 2.60 2.90 1.01
Life Satisfaction Group 1 2 155 2.94 2.79 3.10 0.97
Life Satisfaction Group 2 1 179 2.62 2.48 2.77 0.99
Life Satisfaction Group 2 2 151 2.87 2.71 3.03 1.00
Life Satisfaction Group 3 1 176 2.94 2.79 3.09 1.00
Life Satisfaction Group 3 2 158 2.95 2.79 3.11 1.02
Meaning in Life Group 1 1 174 4.44 4.22 4.65 1.45
Meaning in Life Group 1 2 155 4.56 4.32 4.80 1.51
Meaning in Life Group 2 1 179 4.18 3.96 4.39 1.47
Meaning in Life Group 2 2 151 4.28 4.05 4.51 1.46
Meaning in Life Group 3 1 176 4.38 4.15 4.60 1.53
Meaning in Life Group 3 2 158 4.43 4.18 4.68 1.59
Search for Meaning Group 1 1 174 4.92 4.70 5.14 1.46
Search for Meaning Group 1 2 155 4.74 4.51 4.98 1.49
Search for Meaning Group 2 1 179 4.99 4.79 5.19 1.35
Search for Meaning Group 2 2 151 4.82 4.58 5.05 1.46
Search for Meaning Group 3 1 176 4.84 4.63 5.06 1.46
Search for Meaning Group 3 2 158 4.63 4.38 4.87 1.57
Self-Esteem Group 1 1 174 3.33 3.20 3.46 0.88
Self-Esteem Group 1 2 155 3.44 3.30 3.57 0.85
Self-Esteem Group 2 1 179 3.14 3.02 3.26 0.83
Self-Esteem Group 2 2 151 3.33 3.19 3.47 0.88
Self-Esteem Group 3 1 176 3.44 3.30 3.58 0.93
Self-Esteem Group 3 2 158 3.45 3.30 3.61 0.99
Self-Concept Clarity Group 1 1 174 3.10 2.97 3.23 0.88
Self-Concept Clarity Group 1 2 155 3.29 3.15 3.42 0.88
Self-Concept Clarity Group 2 1 179 3.03 2.91 3.15 0.83
Self-Concept Clarity Group 2 2 151 3.13 3.00 3.26 0.83
Self-Concept Clarity Group 3 1 176 3.16 3.03 3.30 0.91
Self-Concept Clarity Group 3 2 158 3.24 3.10 3.38 0.91

Plotting mean-level changes on a descriptive level: all four well-being aspects

Show the code
ggplot(df_manip_check_wb_forplot %>% filter(Trait!="Search for Meaning"), 
       aes(y = m_trait, x = Time, shape = Group)) + 
  geom_point(position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(ymin = lwr95, ymax = upr95), width=.3, position=position_dodge(0.3)) +
  ylab("Mean Well-Being Level (95% CI)") + 
  facet_wrap( ~ Trait, ncol = 2) +
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

5.6 Standard deviations (for later standardization)

Personality

sd_pers_st3 <- df_sbsa3 %>% 
  select(pid, time, group, contains("_comb_"), -c(na_comb_curr, na_comb_ideal)) %>% 
  pivot_longer(-c(pid, time, group), 
               cols_vary = "slowest",
               names_to = c("trait", "reference"),
               names_pattern = "(.*)_(.*)",
               values_to = "score") %>% 
  mutate(trait = gsub("_comb", "", trait),
         reference = ifelse(reference=="curr", "current", reference)) %>% 
  left_join(tibble(long = str_to_title(names(b5_vars)), trait = str_trunc(names(b5_vars), 5, ellipsis = ""))) %>% 
  filter(!is.na(score)) %>% 
  group_by(long, trait, reference, time) %>% 
  summarise(n = n(), m_trait = mean(score), sd_trait = sd(score)) %>% 
  ungroup()

Well-being

sd_wb_st3 <- df_sbsa3 %>% 
  select(pid, time, group, swls, meaning, search, selfes, concept) %>% 
  pivot_longer(-c(pid, time, group), 
               cols_vary = "slowest",
               names_to = c("trait"),
               values_to = "score") %>% 
  mutate(long = case_when(trait=="swls" ~ "Life Satisfaction", trait=="meaning" ~ "Meaning in Life", 
                          trait=="search" ~ "Search for Meaning", 
                           trait=="selfes" ~ "Self-Esteem", trait=="concept" ~ "Self-Concept Clarity")) %>% 
  filter(!is.na(score)) %>% 
  group_by(long, trait, time) %>% 
  summarise(n = n(), m_trait = mean(score), sd_trait = sd(score)) %>% 
  ungroup()

6 Confirmatory results

For Study 3, most of these moedls are only possible in Group 1 which answered both current and ideal personality trait items.

I only left the heading “confirmatory” here for better comparability with Study 1 and Study 2 analyses. The preregistered, confirmatory analyses for Study 3 are found below in section 7!

6.1 Well-being - similarity correlations (H4 in paper)

All four psychological well-being indicators will be positively correlated with a greater similarity between current- and ideal personality-ratings of personality.

To examine this at the level of overall profiles, we will compute the correlations between the psychological well-being indicators and the Fisher z transformed correlations between the facet- and item-level real-ideal personality-profiles. To examine this at the level of individual traits, we will compute the correlation between psychological well-being indicators and the squared difference between current- and ideal personality rating for each Big Five trait and facet.

In Study 3, we can only examine this in Group 3 because the other groups only received current personality items.

6.1.1 Profile similarity

Computations:

Show the code
cormat_profile <- cor(df_sbsa3[, c("swls", "meaning", "search", "selfes", "concept",
                                  "profile_corr_item_z", "profile_corr_facet_z")], use = "pairwise.complete.obs")

rownames(cormat_profile) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", 
                              "Self-Esteem" , "Self Concept Clarity", 
                              "Item-Level Profile Corr (z)", "Facet-Level Profile Corr (z)")
colnames(cormat_profile) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", 
                              "Self-Esteem" , "Self Concept Clarity", 
                              "Item-Level Profile Corr (z)", "Facet-Level Profile Corr (z)")
corrplot(cormat_profile, type = "lower", order = "original", tl.col = "black", tl.srt = 10,
                      addCoef.col ='black', number.cex = 0.7, diag = FALSE) # also add numbers

Positive correlations of well-being indicators with profile similarity between current personality and ideal personality personality. Especially high correlation with self-esteem. High congruence of item-level and facet-level profile similarity.

As a table with confidence intervals - profile similarity:

Show the code
bind_rows(
  bind_cols(
  cormat_profile[6, c(1:5)] %>% as_tibble() %>% 
    mutate(wellbeing = c("swls", "meaning", "search", "selfes", "concept"),
           profile_sim = rep("item-level", 5)) %>% 
    select(profile_sim, wellbeing, corr = value),
  correlation::cor_to_ci(cormat_profile[6, c(1:5)], n = (df_sbsa3 %>% filter(!is.na(swls)) 
                                        %>% tally() %>% pull())) %>% as_tibble(),
  correlation::cor_to_p(cormat_profile[6, c(1:5)], n = (df_sbsa3 %>% filter(!is.na(swls)) 
                                      %>% tally() %>% pull())) %>% as_tibble() %>% select(p) 
  ),
bind_cols(
  cormat_profile[7, c(1:5)] %>% as_tibble() %>% 
    mutate(wellbeing = c("swls", "meaning", "search", "selfes", "concept"),
           profile_sim = rep("facet-level", 5)) %>% 
    select(profile_sim, wellbeing, corr = value),
  correlation::cor_to_ci(cormat_profile[7, c(1:5)], n = (df_sbsa3 %>% filter(!is.na(swls)) 
                                        %>% tally() %>% pull())) %>% as_tibble(),
  correlation::cor_to_p(cormat_profile[7, c(1:5)], n = (df_sbsa3 %>% filter(!is.na(swls)) 
                                      %>% tally() %>% pull())) %>% as_tibble() %>% select(p)
  )
) %>% kable(digits = 3)
profile_sim wellbeing corr CI_low CI_high p
item-level swls 0.435 0.383 0.484 0
item-level meaning 0.505 0.457 0.550 0
item-level search -0.114 -0.175 -0.052 0
item-level selfes 0.602 0.561 0.641 0
item-level concept 0.581 0.538 0.620 0
facet-level swls 0.449 0.398 0.498 0
facet-level meaning 0.506 0.458 0.550 0
facet-level search -0.159 -0.219 -0.098 0
facet-level selfes 0.640 0.602 0.676 0
facet-level concept 0.614 0.573 0.651 0
6.1.1.1 Change in this relation over time and across intervention groups

Computations:

Show the code
# T1
cormat_profile_gr1_t1 <- cor((df_sbsa3 %>% filter(group=="Group 1" & time==1))[, c("swls", "meaning", "search", "selfes", "concept",                                  "profile_corr_item_z", "profile_corr_facet_z")], use = "pairwise.complete.obs")
rownames(cormat_profile_gr1_t1) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                              "Item-Level Profile Corr (z)", "Facet-Level Profile Corr (z)")
colnames(cormat_profile_gr1_t1) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                              "Item-Level Profile Corr (z)", "Facet-Level Profile Corr (z)")

# T2
cormat_profile_gr1_t2 <- cor((df_sbsa3 %>% filter(group=="Group 1" & time==2))[, c("swls", "meaning", "search", "selfes", "concept",                                  "profile_corr_item_z", "profile_corr_facet_z")], use = "pairwise.complete.obs")
rownames(cormat_profile_gr1_t2) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                              "Item-Level Profile Corr (z)", "Facet-Level Profile Corr (z)")
colnames(cormat_profile_gr1_t2) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                              "Item-Level Profile Corr (z)", "Facet-Level Profile Corr (z)")

T1

First assessment

corrplot(cormat_profile_gr1_t1, type = "lower", order = "original", tl.col = "black", tl.srt = 10,
                      addCoef.col ='black', number.cex = 0.7, diag = FALSE) # also add numbers

T2
Second assessment

corrplot(cormat_profile_gr1_t2, type = "lower", order = "original", tl.col = "black", tl.srt = 10,
                      addCoef.col ='black', number.cex = 0.7, diag = FALSE) # also add numbers

No pronounced differences across time.

6.1.2 Individual traits: squared differences

Computations:

Show the code
cormat_sqtraits <- cor(df_sbsa3[, c("swls", "meaning", "search", "selfes", "concept",
                                   paste0(str_trunc(names(b5_vars)[1:5], 5, ellipsis = ""), "_sqdiff"))], 
                       use = "pairwise.complete.obs")

rownames(cormat_sqtraits) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                               str_to_title(names(b5_vars)[1:5]))
colnames(cormat_sqtraits) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                               str_to_title(names(b5_vars)[1:5]))

cormat_sqfacets <- cor(df_sbsa3[, c("swls", "meaning", "search", "selfes", "concept",
                                   paste0(str_trunc(names(b5_vars)[6:20], 5, ellipsis = ""), "_sqdiff"))], 
                       use = "pairwise.complete.obs")

rownames(cormat_sqfacets) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                               str_to_title(names(b5_vars)[6:20]))
colnames(cormat_sqfacets) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                               str_to_title(names(b5_vars)[6:20]))

Big Five traits

corrplot(cormat_sqtraits, type = "lower", order = "original", tl.col = "black", tl.srt = 10,
         addCoef.col ='black', number.cex = 0.7, diag = FALSE) # also add numbers

As a table with confidence intervals - squared differences:

Show the code
cormat_sqtraits[c(6:10), c(1:5)] %>% as_tibble() %>% 
    mutate(personality = names(b5_vars)[1:5]) %>% 
    rename(swls = `Life Satisfaction`, meaning = `Meaning in Life`, search = `Search for Meaning`,
           selfes = `Self-Esteem`, concept = `Self Concept Clarity`) %>% 
    pivot_longer(1:5, names_to = "wellbeing", values_to = "corr") %>% 
    mutate(CI_low = correlation::cor_to_ci(corr, n = (df_sbsa3 %>% filter(!is.na(swls)) 
                                        %>% tally() %>% pull())) %>% 
             as_tibble() %>% pull(CI_low),
           CI_high = correlation::cor_to_ci(corr, n = (df_sbsa3 %>% filter(!is.na(swls)) 
                                        %>% tally() %>% pull())) %>% 
             as_tibble() %>% pull(CI_high),
           p = correlation::cor_to_p(corr, n = (df_sbsa3 %>% filter(!is.na(swls)) 
                                        %>% tally() %>% pull())) %>% 
             as_tibble() %>% pull(p)) %>% 
  kable(digits = 3)
personality wellbeing corr CI_low CI_high p
extraversion swls -0.256 -0.314 -0.197 0.000
extraversion meaning -0.411 -0.462 -0.358 0.000
extraversion search 0.095 0.033 0.156 0.003
extraversion selfes -0.388 -0.440 -0.334 0.000
extraversion concept -0.310 -0.365 -0.252 0.000
agreeableness swls -0.213 -0.272 -0.153 0.000
agreeableness meaning -0.151 -0.211 -0.090 0.000
agreeableness search 0.132 0.070 0.192 0.000
agreeableness selfes -0.241 -0.299 -0.181 0.000
agreeableness concept -0.289 -0.345 -0.231 0.000
conscientiousness swls -0.296 -0.351 -0.238 0.000
conscientiousness meaning -0.126 -0.187 -0.064 0.000
conscientiousness search -0.034 -0.096 0.029 0.288
conscientiousness selfes -0.291 -0.347 -0.233 0.000
conscientiousness concept -0.217 -0.275 -0.157 0.000
neuroticism swls -0.399 -0.450 -0.346 0.000
neuroticism meaning -0.370 -0.422 -0.315 0.000
neuroticism search 0.226 0.166 0.284 0.000
neuroticism selfes -0.497 -0.543 -0.449 0.000
neuroticism concept -0.442 -0.490 -0.390 0.000
openness swls -0.083 -0.144 -0.020 0.009
openness meaning -0.029 -0.091 0.033 0.355
openness search 0.076 0.013 0.137 0.017
openness selfes -0.115 -0.176 -0.053 0.000
openness concept -0.016 -0.079 0.046 0.606

Big Five facets

corrplot(cormat_sqfacets, type = "lower", order = "original", tl.col = "black", tl.srt = 10,
         addCoef.col ='black', number.cex = 0.6, diag = FALSE) # also add numbers

Here we see negative correlations of well-being indicators with squared trait- and facet-level mean-score differences between current personality and ideal personality personality. Especially pronounced negative correlations for neuroticism, extraversion, and conscientiousness (in that order).

6.1.2.1 Change in this relation over time and across intervention groups

Only looking at the trait level for now!

Computations:

Show the code
# T1
cormat_sqtraits_gr1_t1 <- cor((df_sbsa3 %>% filter(group=="Group 1" & time==1))[, c("swls", "meaning", "search", "selfes", "concept",
                                   paste0(str_trunc(names(b5_vars)[1:5], 5, ellipsis = ""), "_sqdiff"))], 
                       use = "pairwise.complete.obs")
rownames(cormat_sqtraits_gr1_t1) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                               str_to_title(names(b5_vars)[1:5]))
colnames(cormat_sqtraits_gr1_t1) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                               str_to_title(names(b5_vars)[1:5]))

# T2
cormat_sqtraits_gr1_t2 <- cor((df_sbsa3 %>% filter(group=="Group 1" & time==2))[, c("swls", "meaning", "search", "selfes", "concept",
                                   paste0(str_trunc(names(b5_vars)[1:5], 5, ellipsis = ""), "_sqdiff"))], 
                       use = "pairwise.complete.obs")
rownames(cormat_sqtraits_gr1_t2) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                               str_to_title(names(b5_vars)[1:5]))
colnames(cormat_sqtraits_gr1_t2) <- c("Life Satisfaction", "Meaning in Life", "Search for Meaning", "Self-Esteem" , "Self Concept Clarity", 
                               str_to_title(names(b5_vars)[1:5]))

T1

First assessment:

corrplot(cormat_sqtraits_gr1_t1, type = "lower", order = "original", tl.col = "black", tl.srt = 10,
                      addCoef.col ='black', number.cex = 0.7, diag = FALSE) # also add numbers

T2
Second assessment:

corrplot(cormat_sqtraits_gr1_t2, type = "lower", order = "original", tl.col = "black", tl.srt = 10,
                      addCoef.col ='black', number.cex = 0.7, diag = FALSE) # also add numbers

At T2, slightly lower correlations for openness. Relatively similar results across time, otherwise.

6.1.2.2 Exploratory: Controlling for initial trait levels

Additional, exploratory analysis: Are the relationships of the squared trait differences with well-being similar when we control for initial trait levels (at T1)?

Example with extraversion:

cor(df_sbsa3 %>% filter(group=="Group 1" & time==1) %>% pull(swls), 
    df_sbsa3 %>% filter(group=="Group 1" & time==1) %>% pull(extra_sqdiff), 
    use = "pairwise.complete.obs") %>% round(2)
[1] -0.27
# basic model to replicate the correlation
mod_ex_1 <- lm(swls ~ extra_sqdiff, data = df_sbsa3 %>% filter(group=="Group 1" & time==1))
summary(mod_ex_1)$coef
              Estimate Std. Error   t value     Pr(>|t|)
(Intercept)   2.989693 0.09815233 30.459725 3.331613e-71
extra_sqdiff -0.135079 0.03689506 -3.661167 3.338466e-04
effectsize::standardize_parameters(mod_ex_1)
# Standardization method: refit

Parameter    | Std. Coef. |         95% CI
------------------------------------------
(Intercept)  |   3.23e-16 | [-0.14,  0.14]
extra_sqdiff |      -0.27 | [-0.41, -0.12]
# adjusted model controlling for initial trait level
mod_ex_2 <- lm(swls ~ extra_sqdiff + extra_comb_curr, 
               data = df_sbsa3 %>% filter(group=="Group 1" & time==1))
summary(mod_ex_2)$coef
                   Estimate Std. Error   t value     Pr(>|t|)
(Intercept)      1.47215535 0.43334305  3.397205 0.0008467750
extra_sqdiff    -0.01786883 0.04837127 -0.369410 0.7122791641
extra_comb_curr  0.46323156 0.12906599  3.589106 0.0004331415
effectsize::standardize_parameters(mod_ex_2)
# Standardization method: refit

Parameter       | Std. Coef. |        95% CI
--------------------------------------------
(Intercept)     |   3.42e-16 | [-0.14, 0.14]
extra_sqdiff    |      -0.04 | [-0.23, 0.15]
extra_comb_curr |       0.35 | [ 0.16, 0.54]

Reduced correlation (effect size) in the adjusted model that is not significant, anymore (but small sample here in Study 3).

Only T1:

Show the code
# prepare df
df_check_adjusted <- df_sbsa3 %>% 
  # reshape with well-being outcomes and current trait levels
  filter(group=="Group 1" & time==1) %>% 
  select(pid, group, 
         swls, meaning, selfes, concept,
         paste0(str_trunc(names(b5_vars), 5, ellipsis = ""), "_comb_curr")) %>% 
  pivot_longer(ends_with("_comb_curr"), 
               names_to = "test", names_prefix = "facet", values_to = "initial", values_drop_na = TRUE) %>% 
  mutate(test = sub("_.*", "", test)) %>% 
  left_join(
    df_sbsa3 %>% 
    # reshape with squared differences -> add to previous
      filter(group=="Group 1" & time==1) %>% 
      select(pid, group, ends_with("_sqdiff")) %>% 
      pivot_longer(ends_with("_sqdiff"), 
                   names_to = "test", names_prefix = "facet", values_to = "score", values_drop_na = TRUE) %>% 
      mutate(test = sub("_.*", "", test))
  )

# run models 
# swls
df_check_adjusted_swls <- df_check_adjusted %>% 
  group_nest(test) %>% 
  mutate(lm_mods = map(data, ~lm(score ~ swls + initial, data = .x))) %>% 
  pull(lm_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_check_adjusted_swls_unlist <- as.data.frame(summary(df_check_adjusted_swls[[1]])$coefficients) %>% as_tibble() %>% 
  mutate(d = effectsize::standardize_parameters(df_check_adjusted_swls[[1]])$Std_Coefficient) # standardized beta
for (i in 2:length(df_check_adjusted_swls)) {
  df_check_adjusted_swls_unlist <- 
    bind_rows(df_check_adjusted_swls_unlist, 
              as.data.frame(summary(df_check_adjusted_swls[[i]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_swls[[i]])$Std_Coefficient))
}
df_check_adjusted_swls_unlist <- df_check_adjusted_swls_unlist %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=3), 
               term = c(rep(c("Intercept", "swls", "initial"), 20))) %>% 
        rename(p = `Pr(>|t|)`, std_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, d, std_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars))))

# meaning
df_check_adjusted_meaning <- df_check_adjusted %>% 
  group_nest(test) %>% 
  mutate(lm_mods = map(data, ~lm(score ~ meaning + initial, data = .x))) %>% 
  pull(lm_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_check_adjusted_meaning_unlist <- as.data.frame(summary(df_check_adjusted_meaning[[1]])$coefficients) %>% as_tibble() %>% 
  mutate(d = effectsize::standardize_parameters(df_check_adjusted_meaning[[1]])$Std_Coefficient) # standardized beta
for (i in 2:length(df_check_adjusted_meaning)) {
  df_check_adjusted_meaning_unlist <- 
    bind_rows(df_check_adjusted_meaning_unlist, 
              as.data.frame(summary(df_check_adjusted_meaning[[i]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_meaning[[i]])$Std_Coefficient))
}
df_check_adjusted_meaning_unlist <- df_check_adjusted_meaning_unlist %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=3), 
               term = c(rep(c("Intercept", "meaning", "initial"), 20))) %>% 
        rename(p = `Pr(>|t|)`, std_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, d, std_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars))))

# selfes
df_check_adjusted_selfes <- df_check_adjusted %>% 
  group_nest(test) %>% 
  mutate(lm_mods = map(data, ~lm(score ~ selfes + initial, data = .x))) %>% 
  pull(lm_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_check_adjusted_selfes_unlist <- as.data.frame(summary(df_check_adjusted_selfes[[1]])$coefficients) %>% as_tibble() %>% 
  mutate(d = effectsize::standardize_parameters(df_check_adjusted_selfes[[1]])$Std_Coefficient) # standardized beta
for (i in 2:length(df_check_adjusted_selfes)) {
  df_check_adjusted_selfes_unlist <- 
    bind_rows(df_check_adjusted_selfes_unlist, 
              as.data.frame(summary(df_check_adjusted_selfes[[i]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_selfes[[i]])$Std_Coefficient))
}
df_check_adjusted_selfes_unlist <- df_check_adjusted_selfes_unlist %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=3), 
               term = c(rep(c("Intercept", "selfes", "initial"), 20))) %>% 
        rename(p = `Pr(>|t|)`, std_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, d, std_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars))))

# concept
df_check_adjusted_concept <- df_check_adjusted %>% 
  group_nest(test) %>% 
  mutate(lm_mods = map(data, ~lm(score ~ concept + initial, data = .x))) %>% 
  pull(lm_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_check_adjusted_concept_unlist <- as.data.frame(summary(df_check_adjusted_concept[[1]])$coefficients) %>% as_tibble() %>% 
  mutate(d = effectsize::standardize_parameters(df_check_adjusted_concept[[1]])$Std_Coefficient) # standardized beta
for (i in 2:length(df_check_adjusted_concept)) {
  df_check_adjusted_concept_unlist <- 
    bind_rows(df_check_adjusted_concept_unlist, 
              as.data.frame(summary(df_check_adjusted_concept[[i]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_concept[[i]])$Std_Coefficient))
}
df_check_adjusted_concept_unlist <- df_check_adjusted_concept_unlist %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=3), 
               term = c(rep(c("Intercept", "concept", "initial"), 20))) %>% 
        rename(p = `Pr(>|t|)`, std_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, d, std_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars))))

Does the relationship between the squared difference in one trait (current vs. ideal) and well-being aspects still hold if we control for the initial trait level?

Combine as one table:

kable(df_check_adjusted_swls_unlist %>% 
        filter(!term %in% c("Intercept", "initial")) %>% 
        select(outcome, d_swls = d, p_ls = p, sig_ls = sig) %>% 
        left_join(
          df_check_adjusted_meaning_unlist %>% 
            filter(!term %in% c("Intercept", "initial")) %>% 
            select(outcome, d_meaning = d, p_m = p, sig_m = sig)
        ) %>% 
          left_join(
            df_check_adjusted_selfes_unlist %>% 
              filter(!term %in% c("Intercept", "initial")) %>% 
              select(outcome, d_selfes = d, p_se = p, sig_se = sig)
          ) %>% 
          left_join(
            df_check_adjusted_concept_unlist %>% 
              filter(!term %in% c("Intercept", "initial")) %>% 
              select(outcome, d_concept = d, p_c = p, sig_c = sig)
          ), 
       digits = 3)
outcome d_swls p_ls sig_ls d_meaning p_m sig_m d_selfes p_se sig_se d_concept p_c sig_c
extraversion -0.022 0.712 -0.069 0.291 -0.117 0.063 -0.081 0.178
agreeableness -0.110 0.114 -0.059 0.398 -0.157 0.025 * -0.149 0.035 *
conscientiousness -0.088 0.069 0.054 0.260 0.016 0.759 0.099 0.048 *
neuroticism -0.039 0.429 0.092 0.070 0.046 0.412 0.135 0.016 *
openness -0.085 0.205 0.021 0.758 -0.079 0.245 0.039 0.567
sociability -0.017 0.794 -0.093 0.142 -0.141 0.024 * -0.118 0.053
assertiveness 0.024 0.702 -0.009 0.900 -0.057 0.390 -0.059 0.357
energy -0.055 0.321 -0.037 0.558 -0.047 0.429 -0.042 0.455
compassion -0.183 0.015 * -0.137 0.068 -0.192 0.010 * -0.101 0.182
respectfulness -0.068 0.264 -0.002 0.972 -0.054 0.380 -0.071 0.255
trust -0.059 0.384 0.022 0.745 -0.099 0.142 -0.113 0.096
organization -0.116 0.015 * -0.048 0.312 -0.034 0.487 -0.034 0.485
productiveness -0.105 0.020 * 0.043 0.340 -0.010 0.831 0.089 0.056
responsibility -0.066 0.258 0.067 0.238 -0.044 0.463 0.099 0.095
anxiety -0.016 0.763 0.083 0.121 0.008 0.894 0.097 0.095
depression -0.075 0.134 0.046 0.421 0.008 0.896 0.088 0.118
volatility -0.035 0.507 0.055 0.297 0.057 0.307 0.118 0.038 *
curiosity -0.057 0.418 0.098 0.162 -0.067 0.345 -0.051 0.469
aesthetic -0.051 0.463 -0.002 0.976 -0.093 0.180 -0.008 0.903
imagination -0.096 0.137 -0.028 0.674 -0.028 0.672 0.006 0.933

Effects mostly not significant, anymore but much smaller sample size here in Study 3.

Across all time points, using a mixed model:

Show the code
# prepare df
df_check_adjusted_all <- df_sbsa3 %>% 
  # reshape with well-being outcomes and current trait levels
  filter(group=="Group 1") %>% 
  select(pid, group, time,
         swls, meaning, selfes, concept,
         paste0(str_trunc(names(b5_vars), 5, ellipsis = ""), "_sqdiff")) %>% 
  pivot_longer(ends_with("_sqdiff"), 
               names_to = "test", names_prefix = "facet", values_to = "score", values_drop_na = TRUE) %>% 
  mutate(test = sub("_.*", "", test)) %>% 
  left_join(
    df_sbsa3 %>% 
    # reshape with squared differences -> add to previous
      filter(group=="Group 1" & time==1) %>% 
      select(pid, group, ends_with("_comb_curr")) %>% 
      pivot_longer(ends_with("_comb_curr"), 
                   names_to = "test", names_prefix = "facet", values_to = "initial", values_drop_na = TRUE) %>% 
      mutate(test = sub("_.*", "", test))
  )

# run models 
# swls
df_check_adjusted_swls_all <- df_check_adjusted_all %>% 
  group_nest(test) %>% 
  mutate(lm_mods = map(data, ~lmerTest::lmer(score ~ swls + initial + (1 | pid), data = .x))) %>% # groupm intercept model
  pull(lm_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_check_adjusted_swls_all_unlist <- as.data.frame(summary(df_check_adjusted_swls_all[[1]])$coefficients) %>% as_tibble() %>% 
  mutate(d = effectsize::standardize_parameters(df_check_adjusted_swls_all[[1]])$Std_Coefficient) # standardized beta
for (i in 2:length(df_check_adjusted_swls_all)) {
  df_check_adjusted_swls_all_unlist <- 
    bind_rows(df_check_adjusted_swls_all_unlist, 
              as.data.frame(summary(df_check_adjusted_swls_all[[i]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_swls_all[[i]])$Std_Coefficient))
}
df_check_adjusted_swls_all_unlist <- df_check_adjusted_swls_all_unlist %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=3), 
               term = c(rep(c("Intercept", "swls", "initial"), 20))) %>% 
        rename(p = `Pr(>|t|)`, std_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, d, std_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars))))

# meaning
df_check_adjusted_meaning_all <- df_check_adjusted_all %>% 
  group_nest(test) %>% 
  mutate(lm_mods = map(data, ~lmerTest::lmer(score ~ meaning + initial + (1 | pid), data = .x))) %>% # groupm intercept model
  pull(lm_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_check_adjusted_meaning_all_unlist <- as.data.frame(summary(df_check_adjusted_meaning_all[[1]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_meaning_all[[1]])$Std_Coefficient)
for (i in 2:length(df_check_adjusted_meaning_all)) {
  df_check_adjusted_meaning_all_unlist <- 
    bind_rows(df_check_adjusted_meaning_all_unlist, 
              as.data.frame(summary(df_check_adjusted_meaning_all[[i]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_meaning_all[[i]])$Std_Coefficient))
}
df_check_adjusted_meaning_all_unlist <- df_check_adjusted_meaning_all_unlist %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=3), 
               term = c(rep(c("Intercept", "meaning", "initial"), 20))) %>% 
        rename(p = `Pr(>|t|)`, std_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, d, std_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars))))

# selfes
df_check_adjusted_selfes_all <- df_check_adjusted_all %>% 
  group_nest(test) %>% 
  mutate(lm_mods = map(data, ~lmerTest::lmer(score ~ selfes + initial + (1 | pid), data = .x))) %>% # groupm intercept model
  pull(lm_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_check_adjusted_selfes_all_unlist <- as.data.frame(summary(df_check_adjusted_selfes_all[[1]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_selfes_all[[1]])$Std_Coefficient)
for (i in 2:length(df_check_adjusted_selfes_all)) {
  df_check_adjusted_selfes_all_unlist <- 
    bind_rows(df_check_adjusted_selfes_all_unlist, 
              as.data.frame(summary(df_check_adjusted_selfes_all[[i]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_selfes_all[[i]])$Std_Coefficient))
}
df_check_adjusted_selfes_all_unlist <- df_check_adjusted_selfes_all_unlist %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=3), 
               term = c(rep(c("Intercept", "selfes", "initial"), 20))) %>% 
        rename(p = `Pr(>|t|)`, std_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, d, std_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars))))

# concept
df_check_adjusted_concept_all <- df_check_adjusted_all %>% 
  group_nest(test) %>% 
  mutate(lm_mods = map(data, ~lmerTest::lmer(score ~ concept + initial + (1 | pid), data = .x))) %>% # groupm intercept model
  pull(lm_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_check_adjusted_concept_all_unlist <- as.data.frame(summary(df_check_adjusted_concept_all[[1]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_concept_all[[1]])$Std_Coefficient)
for (i in 2:length(df_check_adjusted_concept_all)) {
  df_check_adjusted_concept_all_unlist <- 
    bind_rows(df_check_adjusted_concept_all_unlist, 
              as.data.frame(summary(df_check_adjusted_concept_all[[i]])$coefficients) %>% as_tibble() %>% 
                mutate(d = effectsize::standardize_parameters(df_check_adjusted_concept_all[[i]])$Std_Coefficient))
}
df_check_adjusted_concept_all_unlist <- df_check_adjusted_concept_all_unlist %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=3), 
               term = c(rep(c("Intercept", "concept", "initial"), 20))) %>% 
        rename(p = `Pr(>|t|)`, std_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, d, std_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars))))

Does the relationship between the squared difference in one trait (current vs. ideal) and well-being aspects still hold if we control for the initial trait level?

Combine as one table:

kable(df_check_adjusted_swls_all_unlist %>% 
        filter(!term %in% c("Intercept", "initial")) %>% 
        select(outcome, d_swls = d, p_ls = p, sig_ls = sig) %>% 
        left_join(
          df_check_adjusted_meaning_all_unlist %>% 
            filter(!term %in% c("Intercept", "initial")) %>% 
            select(outcome, d_meaning = d, p_m = p, sig_m = sig)
        ) %>% 
          left_join(
            df_check_adjusted_selfes_all_unlist %>% 
              filter(!term %in% c("Intercept", "initial")) %>% 
              select(outcome, d_selfes = d, p_se = p, sig_se = sig)
          ) %>% 
          left_join(
            df_check_adjusted_concept_all_unlist %>% 
              filter(!term %in% c("Intercept", "initial")) %>% 
              select(outcome, d_concept = d, p_c = p, sig_c = sig)
          ), 
       digits = 3)
outcome d_swls p_ls sig_ls d_meaning p_m sig_m d_selfes p_se sig_se d_concept p_c sig_c
extraversion -0.028 0.566 -0.070 0.182 -0.098 0.057 -0.112 0.018 *
agreeableness -0.095 0.092 -0.056 0.326 -0.106 0.064 -0.188 0.001 ***
conscientiousness -0.068 0.098 0.039 0.346 -0.006 0.890 0.013 0.749
neuroticism -0.080 0.058 -0.007 0.878 -0.066 0.168 -0.017 0.699
openness -0.058 0.284 0.017 0.752 -0.067 0.224 0.030 0.576
sociability -0.056 0.288 -0.095 0.073 -0.137 0.010 ** -0.148 0.003 **
assertiveness 0.029 0.586 -0.002 0.974 -0.066 0.226 -0.092 0.076
energy -0.048 0.316 -0.069 0.187 -0.046 0.364 -0.066 0.159
compassion -0.135 0.026 * -0.136 0.025 * -0.130 0.033 * -0.155 0.010 **
respectfulness -0.074 0.148 -0.071 0.171 -0.076 0.149 -0.161 0.002 **
trust -0.114 0.040 * -0.010 0.851 -0.120 0.032 * -0.125 0.023 *
organization -0.082 0.049 * -0.044 0.300 -0.031 0.465 -0.053 0.202
productiveness -0.143 0.001 *** -0.037 0.385 -0.079 0.078 -0.030 0.492
responsibility -0.032 0.496 0.116 0.011 * -0.036 0.449 0.027 0.548
anxiety -0.059 0.200 -0.006 0.897 -0.091 0.065 -0.041 0.393
depression -0.162 0.000 *** -0.099 0.037 * -0.129 0.015 * -0.027 0.558
volatility -0.045 0.330 -0.003 0.952 -0.012 0.806 0.014 0.780
curiosity -0.071 0.208 0.025 0.656 -0.095 0.096 -0.069 0.217
aesthetic -0.009 0.870 0.028 0.618 -0.052 0.350 0.005 0.923
imagination -0.090 0.089 -0.058 0.285 -0.049 0.369 -0.025 0.639

Effects mostly not significant, anymore, but much smaller sample size here in Study 3 (only Group 1). Exceptions: productiveness, depression & life satisfaction. Depression & meaning in life. Sociability & self-esteem.


6.2 Well-being - latent change (H5 in paper)

Both groups will increase in all four psychological well-being indicators.

We will test the mean-level difference between baseline and follow up using a latent change model.

6.2.1 Life satisfaction

Fit model:

Show the code
# Code snippets adapted from Kievit et al. (2018) -- CC-BY -- https://doi.org/10.1016/j.dcn.2017.11.007

# Fit the multiple indicator Univariate Latent Change Score model
mi_lcs_swls_hyp2 <- '
swls_t1 =~ 1*sw06_01_t1 + lamb2*sw06_02_t1 + lamb3*sw06_03_t1 + lamb4*sw06_04_t1 # This specifies the measurement model for swls_t1 
swls_t2 =~ 1*sw06_01_t2 + lamb2*sw06_02_t2 + lamb3*sw06_03_t2 + lamb4*sw06_04_t2 # This specifies the measurement model for swls_t2 with the equality constrained factor loadings

swls_t2 ~ 1*swls_t1     # This parameter regresses swls_t2 perfectly on swls_t1
d_swls_1 =~ 1*swls_t2   # This defines the latent change score factor as measured perfectly by scores on swls_t2
swls_t2 ~ 0*1           # This line constrains the intercept of swls_t2 to 0
swls_t2 ~~ 0*swls_t2    # This fixes the variance of swls_t2 to 0

d_swls_1 ~ 1           # This estimates the intercept of the change score 
swls_t1 ~ 1            # This estimates the intercept of swls_t1 
d_swls_1 ~~ d_swls_1   # This estimates the variance of the change scores 
swls_t1 ~~ swls_t1     # This estimates the variance of the swls_t1 
d_swls_1 ~~ swls_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

sw06_01_t1 ~~ sw06_01_t2   # This allows residual covariance on indicator X1 across T1 and T2
sw06_02_t1 ~~ sw06_02_t2   # This allows residual covariance on indicator X2 across T1 and T2
sw06_03_t1 ~~ sw06_03_t2   # This allows residual covariance on indicator X3 across T1 and T2
sw06_04_t1 ~~ sw06_04_t2   # This allows residual covariance on indicator X4 across T1 and T2

sw06_01_t1 ~~ res1*sw06_01_t1   # This allows residual variance on indicator X1 at T1 
sw06_02_t1 ~~ res2*sw06_02_t1   # This allows residual variance on indicator X2 at T1
sw06_03_t1 ~~ res3*sw06_03_t1   # This allows residual variance on indicator X3 at T1
sw06_04_t1 ~~ res4*sw06_04_t1   # This allows residual variance on indicator X4 at T1

sw06_01_t2 ~~ res1*sw06_01_t2  # This allows residual variance on indicator X1 at T2 
sw06_02_t2 ~~ res2*sw06_02_t2  # This allows residual variance on indicator X2 at T2 
sw06_03_t2 ~~ res3*sw06_03_t2  # This allows residual variance on indicator X3 at T2
sw06_04_t2 ~~ res4*sw06_04_t2  # This allows residual variance on indicator X4 at T2

sw06_01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
sw06_02_t1 ~ m2*1     # This estimates the intercept of X2 at T1
sw06_03_t1 ~ m3*1     # This estimates the intercept of X3 at T1
sw06_04_t1 ~ m4*1     # This estimates the intercept of X4 at T1

sw06_01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
sw06_02_t2 ~ m2*1     # This estimates the intercept of X2 at T2
sw06_03_t2 ~ m3*1     # This estimates the intercept of X3 at T2
sw06_04_t2 ~ m4*1     # This estimates the intercept of X4 at T2
'
fit_mi_lcs_swls_hyp2 <- lavaan(mi_lcs_swls_hyp2, data=df_sbsa3_wide_wb, estimator='mlr', fixed.x=FALSE, missing='fiml')
summary(fit_mi_lcs_swls_hyp2, fit.measures=TRUE, standardized=TRUE, rsquare=F)

Results summary:

# model fit
kable(broom::glance(fit_mi_lcs_swls_hyp2) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
530 19 10229.01 10310.2 0.989 0.047 0.028
# parameters of interest
params_lcs_swls_hyp2 <- broom::tidy(fit_mi_lcs_swls_hyp2, conf.int = TRUE, conf.level = 0.99) %>% 
        select(term, estimate, conf.low, conf.high, std.all, statistic, p.value) %>% 
        filter(term %in% c("swls_t2 ~ swls_t1", "d_swls_1 =~ swls_t2", "swls_t1 ~~ d_swls_1", # change parameters
                           "d_swls_1 ~1 ", "swls_t1 ~1 ", "", # means
                           "d_swls_1 ~~ d_swls_1")) # variances
kable(params_lcs_swls_hyp2, digits = 3)
term estimate conf.low conf.high std.all statistic p.value
swls_t2 ~ swls_t1 1.000 1.000 1.000 1.006 NA NA
d_swls_1 =~ swls_t2 1.000 1.000 1.000 0.566 NA NA
d_swls_1 ~1 0.156 0.072 0.239 0.275 4.775 0
swls_t1 ~1 2.751 2.618 2.884 2.739 53.268 0
d_swls_1 ~~ d_swls_1 0.320 0.189 0.450 1.000 6.308 0
swls_t1 ~~ d_swls_1 -0.166 -0.255 -0.076 -0.292 -4.784 0

Plot model:

semPaths(fit_mi_lcs_swls_hyp2, what="est", 
         sizeLat = 7, sizeMan = 7, edge.label.cex = .75, intercepts = T, edge.color="black")

Significantly higher life satisfaction at the post test, b = 0.156, beta = 0.155, p = 0. Those with initially higher levels of life satisfaction (at T1) change to a lesser degree.

6.2.2 Meaning in life

For the 5-item subscale presence of meaning in life.

Fit model:

Show the code
# Code snippets adapted from Kievit et al. (2018) -- CC-BY -- https://doi.org/10.1016/j.dcn.2017.11.007

# Fit the multiple indicator Univariate Latent Change Score model
mi_lcs_meaning_hyp2 <- '
meaning_t1 =~ 1*ml01_01_t1 + lamb2*ml01_04_t1 + lamb3*ml01_05_t1 + lamb4*ml01_06_t1 + lamb5*ml01_09_t1 # This specifies the measurement model for meaning_t1 
meaning_t2 =~ 1*ml01_01_t2 + lamb2*ml01_04_t2 + lamb3*ml01_05_t2 + lamb4*ml01_06_t2 + lamb5*ml01_09_t2 # This specifies the measurement model for meaning_t2 with the equality constrained factor loadings

meaning_t2 ~ 1*meaning_t1     # This parameter regresses meaning_t2 perfectly on meaning_t1
d_meaning_1 =~ 1*meaning_t2   # This defines the latent change score factor as measured perfectly by scores on meaning_t2
meaning_t2 ~ 0*1              # This line constrains the intercept of meaning_t2 to 0
meaning_t2 ~~ 0*meaning_t2    # This fixes the variance of meaning_t2 to 0

d_meaning_1 ~ 1              # This estimates the intercept of the change score 
meaning_t1 ~ 1               # This estimates the intercept of meaning_t1 
d_meaning_1 ~~ d_meaning_1   # This estimates the variance of the change scores 
meaning_t1 ~~ meaning_t1     # This estimates the variance of the meaning_t1 
d_meaning_1 ~~ meaning_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

ml01_01_t1 ~~ ml01_01_t2   # This allows residual covariance on indicator X1 across T1 and T2
ml01_04_t1 ~~ ml01_04_t2   # This allows residual covariance on indicator X2 across T1 and T2
ml01_05_t1 ~~ ml01_05_t2   # This allows residual covariance on indicator X3 across T1 and T2
ml01_06_t1 ~~ ml01_06_t2   # This allows residual covariance on indicator X4 across T1 and T2
ml01_09_t1 ~~ ml01_09_t2   # This allows residual covariance on indicator X5 across T1 and T2

ml01_01_t1 ~~ res1*ml01_01_t1   # This allows residual variance on indicator X1 at T1 
ml01_04_t1 ~~ res2*ml01_04_t1   # This allows residual variance on indicator X2 at T1
ml01_05_t1 ~~ res3*ml01_05_t1   # This allows residual variance on indicator X3 at T1
ml01_06_t1 ~~ res4*ml01_06_t1   # This allows residual variance on indicator X4 at T1
ml01_09_t1 ~~ res5*ml01_09_t1   # This allows residual variance on indicator X5 at T1

ml01_01_t2 ~~ res1*ml01_01_t2  # This allows residual variance on indicator X1 at T2 
ml01_04_t2 ~~ res2*ml01_04_t2  # This allows residual variance on indicator X2 at T2 
ml01_05_t2 ~~ res3*ml01_05_t2  # This allows residual variance on indicator X3 at T2
ml01_06_t2 ~~ res4*ml01_06_t2  # This allows residual variance on indicator X4 at T2
ml01_09_t2 ~~ res5*ml01_09_t2  # This allows residual variance on indicator X5 at T2

ml01_01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ml01_04_t1 ~ m2*1     # This estimates the intercept of X2 at T1
ml01_05_t1 ~ m3*1     # This estimates the intercept of X3 at T1
ml01_06_t1 ~ m4*1     # This estimates the intercept of X4 at T1
ml01_09_t1 ~ m5*1     # This estimates the intercept of X5 at T1

ml01_01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ml01_04_t2 ~ m2*1     # This estimates the intercept of X2 at T2
ml01_05_t2 ~ m3*1     # This estimates the intercept of X3 at T2
ml01_06_t2 ~ m4*1     # This estimates the intercept of X4 at T2
ml01_09_t2 ~ m5*1     # This estimates the intercept of X5 at T2
'
fit_mi_lcs_meaning_hyp2 <- lavaan(mi_lcs_meaning_hyp2, data=df_sbsa3_wide_wb, estimator='mlr', fixed.x=FALSE, missing='fiml')
summary(fit_mi_lcs_meaning_hyp2, fit.measures=TRUE, standardized=TRUE, rsquare=F)

Results summary:

# model fit
kable(broom::glance(fit_mi_lcs_meaning_hyp2) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
530 23 15410.33 15508.61 0.987 0.05 0.025
# parameters of interest
params_lcs_meaning_hyp2 <- broom::tidy(fit_mi_lcs_meaning_hyp2, conf.int = TRUE, conf.level = 0.99) %>% 
        select(term, estimate, conf.low, conf.high, std.all, statistic, p.value) %>% 
        filter(term %in% c("meaning_t2 ~ meaning_t1", "d_meaning_1 =~ meaning_t2", "meaning_t1 ~~ d_meaning_1", # change parameters
                           "d_meaning_1 ~1 ", "meaning_t1 ~1 ", "", # means
                           "d_meaning_1 ~~ d_meaning_1")) # variances

kable(params_lcs_meaning_hyp2, digits = 3)
term estimate conf.low conf.high std.all statistic p.value
meaning_t2 ~ meaning_t1 1.000 1.000 1.000 0.988 NA NA
d_meaning_1 =~ meaning_t2 1.000 1.000 1.000 0.586 NA NA
d_meaning_1 ~1 0.059 -0.056 0.174 0.072 1.330 0.183
meaning_t1 ~1 4.360 4.180 4.540 3.128 62.442 0.000
d_meaning_1 ~~ d_meaning_1 0.683 0.505 0.862 1.000 9.863 0.000
meaning_t1 ~~ d_meaning_1 -0.318 -0.486 -0.151 -0.276 -4.901 0.000

Participants did not improve significantly in meaning in life across time, b = 0.059, beta = 0.04, p = 0.183.

6.2.2.1 Search for Meaning

For the 5-item subscale search for meaning in life.

Fit model:

Show the code
# Code snippets adapted from Kievit et al. (2018) -- CC-BY -- https://doi.org/10.1016/j.dcn.2017.11.007

# Fit the multiple indicator Univariate Latent Change Score model
mi_lcs_search_hyp2 <- '
search_t1 =~ 1*ml01_02_t1 + lamb2*ml01_03_t1 + lamb3*ml01_07_t1 + lamb4*ml01_08_t1 + lamb5*ml01_10_t1 # This specifies the measurement model for search_t1 
search_t2 =~ 1*ml01_02_t2 + lamb2*ml01_03_t2 + lamb3*ml01_07_t2 + lamb4*ml01_08_t2 + lamb5*ml01_10_t2 # This specifies the measurement model for search_t2 with the equality constrained factor loadings

search_t2 ~ 1*search_t1     # This parameter regresses search_t2 perfectly on search_t1
d_search_1 =~ 1*search_t2   # This defines the latent change score factor as measured perfectly by scores on search_t2
search_t2 ~ 0*1              # This line constrains the intercept of search_t2 to 0
search_t2 ~~ 0*search_t2    # This fixes the variance of search_t2 to 0

d_search_1 ~ 1              # This estimates the intercept of the change score 
search_t1 ~ 1               # This estimates the intercept of search_t1 
d_search_1 ~~ d_search_1   # This estimates the variance of the change scores 
search_t1 ~~ search_t1     # This estimates the variance of the search_t1 
d_search_1 ~~ search_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

ml01_02_t1 ~~ ml01_02_t2   # This allows residual covariance on indicator X1 across T1 and T2
ml01_03_t1 ~~ ml01_03_t2   # This allows residual covariance on indicator X2 across T1 and T2
ml01_07_t1 ~~ ml01_07_t2   # This allows residual covariance on indicator X3 across T1 and T2
ml01_08_t1 ~~ ml01_08_t2   # This allows residual covariance on indicator X4 across T1 and T2
ml01_10_t1 ~~ ml01_10_t2   # This allows residual covariance on indicator X5 across T1 and T2

ml01_02_t1 ~~ res1*ml01_02_t1   # This allows residual variance on indicator X1 at T1 
ml01_03_t1 ~~ res2*ml01_03_t1   # This allows residual variance on indicator X2 at T1
ml01_07_t1 ~~ res3*ml01_07_t1   # This allows residual variance on indicator X3 at T1
ml01_08_t1 ~~ res4*ml01_08_t1   # This allows residual variance on indicator X4 at T1
ml01_10_t1 ~~ res5*ml01_10_t1   # This allows residual variance on indicator X5 at T1

ml01_02_t2 ~~ res1*ml01_02_t2  # This allows residual variance on indicator X1 at T2 
ml01_03_t2 ~~ res2*ml01_03_t2  # This allows residual variance on indicator X2 at T2 
ml01_07_t2 ~~ res3*ml01_07_t2  # This allows residual variance on indicator X3 at T2
ml01_08_t2 ~~ res4*ml01_08_t2  # This allows residual variance on indicator X4 at T2
ml01_10_t2 ~~ res5*ml01_10_t2  # This allows residual variance on indicator X5 at T2

ml01_02_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ml01_03_t1 ~ m2*1     # This estimates the intercept of X2 at T1
ml01_07_t1 ~ m3*1     # This estimates the intercept of X3 at T1
ml01_08_t1 ~ m4*1     # This estimates the intercept of X4 at T1
ml01_10_t1 ~ m5*1     # This estimates the intercept of X5 at T1

ml01_02_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ml01_03_t2 ~ m2*1     # This estimates the intercept of X2 at T2
ml01_07_t2 ~ m3*1     # This estimates the intercept of X3 at T2
ml01_08_t2 ~ m4*1     # This estimates the intercept of X4 at T2
ml01_10_t2 ~ m5*1     # This estimates the intercept of X5 at T2
'
fit_mi_lcs_search_hyp2 <- lavaan(mi_lcs_search_hyp2, data=df_sbsa3_wide_wb, estimator='mlr', fixed.x=FALSE, missing='fiml')
summary(fit_mi_lcs_search_hyp2, fit.measures=TRUE, standardized=TRUE, rsquare=F)

Results summary:

# model fit
kable(broom::glance(fit_mi_lcs_search_hyp2) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
530 23 15276.35 15374.63 0.961 0.085 0.032
# parameters of interest
params_lcs_search_hyp2 <- broom::tidy(fit_mi_lcs_search_hyp2, conf.int = TRUE, conf.level = 0.99) %>% 
        select(term, estimate, conf.low, conf.high, std.all, statistic, p.value) %>% 
        filter(term %in% c("search_t2 ~ search_t1", "d_search_1 =~ search_t2", "search_t1 ~~ d_search_1", # change parameters
                           "d_search_1 ~1 ", "search_t1 ~1 ", "", # means
                           "d_search_1 ~~ d_search_1")) # variances

kable(params_lcs_search_hyp2, digits = 3)
term estimate conf.low conf.high std.all statistic p.value
search_t2 ~ search_t1 1.000 1.000 1.000 0.958 NA NA
d_search_1 =~ search_t2 1.000 1.000 1.000 0.691 NA NA
d_search_1 ~1 -0.153 -0.277 -0.028 -0.165 -3.159 0.002
search_t1 ~1 5.159 4.991 5.326 4.023 79.522 0.000
d_search_1 ~~ d_search_1 0.855 0.575 1.136 1.000 7.853 0.000
search_t1 ~~ d_search_1 -0.355 -0.552 -0.157 -0.299 -4.626 0.000

Participants decreased significantly in search for meaning in life across time, b = -0.153, beta = -0.107, p = 0.002.

6.2.3 Self-esteem

Fit model:

Show the code
# Code snippets adapted from Kievit et al. (2018) -- CC-BY -- https://doi.org/10.1016/j.dcn.2017.11.007

# Fit the multiple indicator Univariate Latent Change Score model
mi_lcs_selfes_hyp2 <- '
selfes_t1 =~ 1*selfes_par1_t1 + lamb2*selfes_par2_t1 + lamb3*selfes_par3_t1 # This specifies the measurement model for selfes_t1 
selfes_t2 =~ 1*selfes_par1_t2 + lamb2*selfes_par2_t2 + lamb3*selfes_par3_t2 # This specifies the measurement model for selfes_t2 with the equality constrained factor loadings

selfes_t2 ~ 1*selfes_t1     # This parameter regresses selfes_t2 perfectly on selfes_t1
d_selfes_1 =~ 1*selfes_t2   # This defines the latent change score factor as measured perfectly by scores on selfes_t2
selfes_t2 ~ 0*1             # This line constrains the intercept of selfes_t2 to 0
selfes_t2 ~~ 0*selfes_t2    # This fixes the variance of selfes_t2 to 0

d_selfes_1 ~ 1             # This estimates the intercept of the change score 
selfes_t1 ~ 1              # This estimates the intercept of selfes_t1 
d_selfes_1 ~~ d_selfes_1   # This estimates the variance of the change scores 
selfes_t1 ~~ selfes_t1     # This estimates the variance of the selfes_t1 
d_selfes_1 ~~ selfes_t1    # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

selfes_par1_t1 ~~ selfes_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
selfes_par2_t1 ~~ selfes_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
selfes_par3_t1 ~~ selfes_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

selfes_par1_t1 ~~ res1*selfes_par1_t1   # This allows residual variance on indicator X1 at T1 
selfes_par2_t1 ~~ res2*selfes_par2_t1   # This allows residual variance on indicator X2 at T1
selfes_par3_t1 ~~ res3*selfes_par3_t1   # This allows residual variance on indicator X3 at T1

selfes_par1_t2 ~~ res1*selfes_par1_t2  # This allows residual variance on indicator X1 at T2 
selfes_par2_t2 ~~ res2*selfes_par2_t2  # This allows residual variance on indicator X2 at T2 
selfes_par3_t2 ~~ res3*selfes_par3_t2  # This allows residual variance on indicator X3 at T2

selfes_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
selfes_par2_t1 ~ m2*1     # This estimates the intercept of X2 at T1
selfes_par3_t1 ~ m3*1     # This estimates the intercept of X3 at T1

selfes_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
selfes_par2_t2 ~ m2*1     # This estimates the intercept of X2 at T2
selfes_par3_t2 ~ m3*1     # This estimates the intercept of X3 at T2
'
fit_mi_lcs_selfes_hyp2 <- lavaan(mi_lcs_selfes_hyp2, data=df_sbsa3_wide_wb, estimator='mlr', fixed.x=FALSE, missing='fiml')
summary(fit_mi_lcs_selfes_hyp2, fit.measures=TRUE, standardized=TRUE, rsquare=F)

Results summary:

# model fit
kable(broom::glance(fit_mi_lcs_selfes_hyp2) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
530 15 5506.131 5570.224 1 0 0.012
# parameters of interest
params_lcs_selfes_hyp2 <- broom::tidy(fit_mi_lcs_selfes_hyp2, conf.int = TRUE, conf.level = 0.99) %>% 
        select(term, estimate, conf.low, conf.high, std.all, statistic, p.value) %>% 
        filter(term %in% c("selfes_t2 ~ selfes_t1", "d_selfes_1 =~ selfes_t2", "selfes_t1 ~~ d_selfes_1", # change parameters
                           "d_selfes_1 ~1 ", "selfes_t1 ~1 ", "", # means
                           "d_selfes_1 ~~ d_selfes_1")) # variances

kable(params_lcs_selfes_hyp2, digits = 3)
term estimate conf.low conf.high std.all statistic p.value
selfes_t2 ~ selfes_t1 1.000 1.000 1.000 0.985 NA NA
d_selfes_1 =~ selfes_t2 1.000 1.000 1.000 0.505 NA NA
d_selfes_1 ~1 0.093 0.037 0.149 0.234 4.292 0
selfes_t1 ~1 3.600 3.503 3.697 4.619 95.709 0
d_selfes_1 ~~ d_selfes_1 0.160 0.107 0.213 1.000 7.840 0
selfes_t1 ~~ d_selfes_1 -0.070 -0.115 -0.026 -0.226 -4.067 0

Participants increased significantly in self-esteem between the two assessments, b = 0.093, beta = 0.105, p = 0.

6.2.4 Self concept clarity

Fit model:

Show the code
# Code snippets adapted from Kievit et al. (2018) -- CC-BY -- https://doi.org/10.1016/j.dcn.2017.11.007

# Fit the multiple indicator Univariate Latent Change Score model
mi_lcs_concept_hyp2 <- '
concept_t1 =~ 1*concept_par1_t1 + lamb2*concept_par2_t1 + lamb3*concept_par3_t1 # This specifies the measurement model for concept_t1 
concept_t2 =~ 1*concept_par1_t2 + lamb2*concept_par2_t2 + lamb3*concept_par3_t2 # This specifies the measurement model for concept_t2 with the equality constrained factor loadings

concept_t2 ~ 1*concept_t1     # This parameter regresses concept_t2 perfectly on concept_t1
d_concept_1 =~ 1*concept_t2   # This defines the latent change score factor as measured perfectly by scores on concept_t2
concept_t2 ~ 0*1              # This line constrains the intercept of concept_t2 to 0
concept_t2 ~~ 0*concept_t2    # This fixes the variance of concept_t2 to 0

d_concept_1 ~ 1              # This estimates the intercept of the change score 
concept_t1 ~ 1               # This estimates the intercept of concept_t1 
d_concept_1 ~~ d_concept_1   # This estimates the variance of the change scores 
concept_t1 ~~ concept_t1     # This estimates the variance of the concept_t1 
d_concept_1 ~~ concept_t1    # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

concept_par1_t1 ~~ concept_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
concept_par2_t1 ~~ concept_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
concept_par3_t1 ~~ concept_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

concept_par1_t1 ~~ res1*concept_par1_t1   # This allows residual variance on indicator X1 at T1 
concept_par2_t1 ~~ res2*concept_par2_t1   # This allows residual variance on indicator X2 at T1
concept_par3_t1 ~~ res3*concept_par3_t1   # This allows residual variance on indicator X3 at T1

concept_par1_t2 ~~ res1*concept_par1_t2  # This allows residual variance on indicator X1 at T2 
concept_par2_t2 ~~ res2*concept_par2_t2  # This allows residual variance on indicator X2 at T2 
concept_par3_t2 ~~ res3*concept_par3_t2  # This allows residual variance on indicator X3 at T2

concept_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
concept_par2_t1 ~ m2*1     # This estimates the intercept of X2 at T1
concept_par3_t1 ~ m3*1     # This estimates the intercept of X3 at T1

concept_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
concept_par2_t2 ~ m2*1     # This estimates the intercept of X2 at T2
concept_par3_t2 ~ m3*1     # This estimates the intercept of X3 at T2
'
fit_mi_lcs_concept_hyp2 <- lavaan(mi_lcs_concept_hyp2, data=df_sbsa3_wide_wb, estimator='mlr', fixed.x=FALSE, missing='fiml')
summary(fit_mi_lcs_concept_hyp2, fit.measures=TRUE, standardized=TRUE, rsquare=F)

Results summary:

# model fit
kable(broom::glance(fit_mi_lcs_concept_hyp2) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
530 15 5825.619 5889.713 0.997 0.033 0.022
# parameters of interest
params_lcs_concept_hyp2 <- broom::tidy(fit_mi_lcs_concept_hyp2, conf.int = TRUE, conf.level = 0.99) %>% 
        select(term, estimate, conf.low, conf.high, std.all, statistic, p.value) %>% 
        filter(term %in% c("concept_t2 ~ concept_t1", "d_concept_1 =~ concept_t2", "concept_t1 ~~ d_concept_1", # change parameters
                           "d_concept_1 ~1 ", "concept_t1 ~1 ", "", # means
                           "d_concept_1 ~~ d_concept_1")) # variances

kable(params_lcs_concept_hyp2, digits = 3)
term estimate conf.low conf.high std.all statistic p.value
concept_t2 ~ concept_t1 1.000 1.000 1.000 0.986 NA NA
d_concept_1 =~ concept_t2 1.000 1.000 1.000 0.593 NA NA
d_concept_1 ~1 0.105 0.044 0.166 0.236 4.428 0
concept_t1 ~1 3.031 2.935 3.127 4.101 81.260 0
d_concept_1 ~~ d_concept_1 0.198 0.135 0.260 1.000 8.160 0
concept_t1 ~~ d_concept_1 -0.091 -0.138 -0.044 -0.278 -5.019 0

Self concept clarity improved significantly across time, b = 0.105, beta = 0.12, p = 0.


6.3 Distance between ideal- and current personality over time (H1 in paper)

The distance between ideal- and current personality will decrease in both groups.

We will use two strategies to test this hypothesis. First, we will compute the Fisher z-transformed profile correlation between current- and ideal personality and test whether it increased across assessments. Second, we will test whether the squared difference between current- and ideal personality ratings for each Big Five trait decreased across assessments. We will test mean-level differences in profile correlations and squared differences between baseline and follow up using repeated-measures t-test.

In Study 3, we can only test this in Group 1 where we assessed both current and ideal personality ratings.

6.3.1 Profile similarity

Reshape to wide:

Show the code
# reshape to wide
df_sbsa3_wide_profdiff <- df_sbsa3 %>% 
  filter(group=="Group 1") %>% # not possible in the other groups
  arrange(pid, time) %>% 
  select(pid, time, profile_corr_item_z, profile_corr_facet_z, ends_with("_sqdiff")) %>% 
  pivot_wider(names_from = time,
              names_sep = "_t",
              values_from = -c(pid, time))

Checking assumptions

# item-level profile correlation
shapiro.test(df_sbsa3_wide_profdiff$profile_corr_item_z_t2 - df_sbsa3_wide_profdiff$profile_corr_item_z_t1) 

    Shapiro-Wilk normality test

data:  df_sbsa3_wide_profdiff$profile_corr_item_z_t2 - df_sbsa3_wide_profdiff$profile_corr_item_z_t1
W = 0.81981, p-value = 1.711e-12
lillie.test(df_sbsa3_wide_profdiff$profile_corr_item_z_t2 - df_sbsa3_wide_profdiff$profile_corr_item_z_t1)

    Lilliefors (Kolmogorov-Smirnov) normality test

data:  df_sbsa3_wide_profdiff$profile_corr_item_z_t2 - df_sbsa3_wide_profdiff$profile_corr_item_z_t1
D = 0.11182, p-value = 6.975e-05
# descriptives
psych::describe(df_sbsa3_wide_profdiff$profile_corr_item_z_t1)
   vars   n mean   sd median trimmed  mad   min  max range skew kurtosis   se
X1    1 174 0.45 0.48   0.37    0.42 0.48 -0.62 1.91  2.53 0.56     0.33 0.04
psych::describe(df_sbsa3_wide_profdiff$profile_corr_item_z_t2)
   vars   n mean   sd median trimmed  mad   min  max range skew kurtosis   se
X1    1 155 0.54 0.54   0.47    0.49 0.42 -0.36 3.16  3.52 1.43     4.14 0.04
# facet-level profile correlation
shapiro.test(df_sbsa3_wide_profdiff$profile_corr_facet_z_t2 - df_sbsa3_wide_profdiff$profile_corr_facet_z_t1) 

    Shapiro-Wilk normality test

data:  df_sbsa3_wide_profdiff$profile_corr_facet_z_t2 - df_sbsa3_wide_profdiff$profile_corr_facet_z_t1
W = 0.94078, p-value = 4.647e-06
lillie.test(df_sbsa3_wide_profdiff$profile_corr_facet_z_t2 - df_sbsa3_wide_profdiff$profile_corr_facet_z_t1)

    Lilliefors (Kolmogorov-Smirnov) normality test

data:  df_sbsa3_wide_profdiff$profile_corr_facet_z_t2 - df_sbsa3_wide_profdiff$profile_corr_facet_z_t1
D = 0.074446, p-value = 0.03642
# descriptives
psych::describe(df_sbsa3_wide_profdiff$profile_corr_facet_z_t1)
   vars   n mean   sd median trimmed  mad   min  max range skew kurtosis   se
X1    1 174 0.38 0.66   0.33    0.35 0.65 -0.78 2.36  3.14 0.45    -0.08 0.05
psych::describe(df_sbsa3_wide_profdiff$profile_corr_facet_z_t2)
   vars   n mean   sd median trimmed  mad   min  max range skew kurtosis   se
X1    1 155 0.48 0.69   0.39    0.43 0.61 -0.94 2.68  3.62 0.77     0.83 0.06

Results summary:

t_profile_corr_item <- t.test(df_sbsa3_wide_profdiff$profile_corr_item_z_t2, df_sbsa3_wide_profdiff$profile_corr_item_z_t1, paired = TRUE)
t_profile_corr_facet <- t.test(df_sbsa3_wide_profdiff$profile_corr_facet_z_t2, df_sbsa3_wide_profdiff$profile_corr_facet_z_t1, paired = TRUE)

kable(bind_rows(tidy(t_profile_corr_item), tidy(t_profile_corr_facet)) %>% mutate(outcome = c("Item-level profile corr.", "Facet-level profile corr.")) %>% select(outcome, everything()), digits = 3)
outcome estimate statistic p.value parameter conf.low conf.high method alternative
Item-level profile corr. 0.101 3.217 0.002 153 0.039 0.163 Paired t-test two.sided
Facet-level profile corr. 0.117 3.099 0.002 153 0.042 0.191 Paired t-test two.sided
# effect sizes
d_profile_corr_item <- psych::cohen.d(profile_corr_item_z ~ time, data = df_sbsa3 %>% filter(group=="Group 1"))
d_profile_corr_item$cohen.d
                          lower    effect     upper
profile_corr_item_z -0.03676651 0.1802867 0.3970643
d_profile_corr_facet <- psych::cohen.d(profile_corr_facet_z ~ time, data = df_sbsa3 %>% filter(group=="Group 1"))
d_profile_corr_facet$cohen.d
                           lower    effect     upper
profile_corr_facet_z -0.05927102 0.1576606 0.3743511

Plot

profile_df_plot <- df_sbsa3 %>% 
  filter(group=="Group 1") %>% 
  select(pid, time, profile_corr_item_z, profile_corr_facet_z) %>% 
  pivot_longer(-c(pid, time), names_to = "itemfacet", values_to = "corr") %>% 
  mutate(itemfacet2 = fct_recode(itemfacet, "Item-level" = "profile_corr_item_z", "Facet-level" = "profile_corr_facet_z"),
         itemfacet2 = fct_reorder(itemfacet2, corr, .desc = F))

ggplot(profile_df_plot) + 
  aes(x = as.factor(time), y = corr) + 
  geom_boxplot() + 
  geom_violin(fill = NA) +
  facet_wrap(vars(itemfacet2)) +
  labs(x = "Measurement Occasion", y = "Profile correlation") + # title = "Distance between ideal- and current personality"
  theme_bw()

Significantly higher profile correlations at the second measurement occasion, both for the item-level profile correlation and the facet-level profile correlations.

6.3.2 Individual traits: squared differences

Checking assumptions

# extra
shapiro.test(df_sbsa3_wide_profdiff$extra_sqdiff_t2 - df_sbsa3_wide_profdiff$extra_sqdiff_t1) 

    Shapiro-Wilk normality test

data:  df_sbsa3_wide_profdiff$extra_sqdiff_t2 - df_sbsa3_wide_profdiff$extra_sqdiff_t1
W = 0.88604, p-value = 1.618e-09
lillie.test(df_sbsa3_wide_profdiff$extra_sqdiff_t2 - df_sbsa3_wide_profdiff$extra_sqdiff_t1)

    Lilliefors (Kolmogorov-Smirnov) normality test

data:  df_sbsa3_wide_profdiff$extra_sqdiff_t2 - df_sbsa3_wide_profdiff$extra_sqdiff_t1
D = 0.13641, p-value = 2.122e-07
# descriptives
psych::describe(df_sbsa3_wide_profdiff$extra_sqdiff_t1)
   vars   n mean   sd median trimmed  mad min   max range skew kurtosis   se
X1    1 174 1.75 2.01   1.17     1.4 1.37   0 14.06 14.06 2.48     9.26 0.15
psych::describe(df_sbsa3_wide_profdiff$extra_sqdiff_t2)
   vars   n mean   sd median trimmed  mad min   max range skew kurtosis   se
X1    1 155 1.37 1.61   0.84    1.09 1.15   0 10.03 10.03  2.2     6.59 0.13
# agree
shapiro.test(df_sbsa3_wide_profdiff$agree_sqdiff_t2 - df_sbsa3_wide_profdiff$agree_sqdiff_t1) 

    Shapiro-Wilk normality test

data:  df_sbsa3_wide_profdiff$agree_sqdiff_t2 - df_sbsa3_wide_profdiff$agree_sqdiff_t1
W = 0.8918, p-value = 3.291e-09
lillie.test(df_sbsa3_wide_profdiff$agree_sqdiff_t2 - df_sbsa3_wide_profdiff$agree_sqdiff_t1)

    Lilliefors (Kolmogorov-Smirnov) normality test

data:  df_sbsa3_wide_profdiff$agree_sqdiff_t2 - df_sbsa3_wide_profdiff$agree_sqdiff_t1
D = 0.16743, p-value = 2.129e-11
# descriptives
psych::describe(df_sbsa3_wide_profdiff$agree_sqdiff_t1)
   vars   n mean  sd median trimmed mad min  max range skew kurtosis   se
X1    1 174  0.4 0.6   0.14    0.26 0.2   0 3.67  3.67 2.51     7.37 0.05
psych::describe(df_sbsa3_wide_profdiff$agree_sqdiff_t2)
   vars   n mean   sd median trimmed  mad min  max range skew kurtosis   se
X1    1 155 0.29 0.45   0.11    0.19 0.15   0 2.78  2.78 2.86     9.92 0.04
# consc
shapiro.test(df_sbsa3_wide_profdiff$consc_sqdiff_t2 - df_sbsa3_wide_profdiff$consc_sqdiff_t1) 

    Shapiro-Wilk normality test

data:  df_sbsa3_wide_profdiff$consc_sqdiff_t2 - df_sbsa3_wide_profdiff$consc_sqdiff_t1
W = 0.85898, p-value = 7.631e-11
lillie.test(df_sbsa3_wide_profdiff$consc_sqdiff_t2 - df_sbsa3_wide_profdiff$consc_sqdiff_t1)

    Lilliefors (Kolmogorov-Smirnov) normality test

data:  df_sbsa3_wide_profdiff$consc_sqdiff_t2 - df_sbsa3_wide_profdiff$consc_sqdiff_t1
D = 0.17145, p-value = 5.525e-12
# descriptives
psych::describe(df_sbsa3_wide_profdiff$consc_sqdiff_t1)
   vars   n mean   sd median trimmed  mad min   max range skew kurtosis   se
X1    1 174  1.8 2.35   1.09     1.3 1.36   0 13.44 13.44 2.23     5.46 0.18
psych::describe(df_sbsa3_wide_profdiff$consc_sqdiff_t2)
   vars   n mean   sd median trimmed  mad min   max range skew kurtosis   se
X1    1 155 1.54 2.11   0.84    1.08 1.08   0 12.25 12.25 2.49        7 0.17
# neuro
shapiro.test(df_sbsa3_wide_profdiff$neuro_sqdiff_t2 - df_sbsa3_wide_profdiff$neuro_sqdiff_t1) 

    Shapiro-Wilk normality test

data:  df_sbsa3_wide_profdiff$neuro_sqdiff_t2 - df_sbsa3_wide_profdiff$neuro_sqdiff_t1
W = 0.97928, p-value = 0.02024
lillie.test(df_sbsa3_wide_profdiff$neuro_sqdiff_t2 - df_sbsa3_wide_profdiff$neuro_sqdiff_t1)

    Lilliefors (Kolmogorov-Smirnov) normality test

data:  df_sbsa3_wide_profdiff$neuro_sqdiff_t2 - df_sbsa3_wide_profdiff$neuro_sqdiff_t1
D = 0.10473, p-value = 0.0002893
# descriptives
psych::describe(df_sbsa3_wide_profdiff$neuro_sqdiff_t1)
   vars   n mean   sd median trimmed  mad min   max range skew kurtosis   se
X1    1 174 2.78 2.95   1.56    2.32 2.06   0 12.84 12.84 1.25     0.96 0.22
psych::describe(df_sbsa3_wide_profdiff$neuro_sqdiff_t2)
   vars   n mean   sd median trimmed  mad min   max range skew kurtosis   se
X1    1 155 2.52 2.72   1.78    2.04 2.35   0 14.06 14.06 1.58      2.5 0.22
# openn
shapiro.test(df_sbsa3_wide_profdiff$openn_sqdiff_t2 - df_sbsa3_wide_profdiff$openn_sqdiff_t1) 

    Shapiro-Wilk normality test

data:  df_sbsa3_wide_profdiff$openn_sqdiff_t2 - df_sbsa3_wide_profdiff$openn_sqdiff_t1
W = 0.79025, p-value = 1.397e-13
lillie.test(df_sbsa3_wide_profdiff$openn_sqdiff_t2 - df_sbsa3_wide_profdiff$openn_sqdiff_t1)

    Lilliefors (Kolmogorov-Smirnov) normality test

data:  df_sbsa3_wide_profdiff$openn_sqdiff_t2 - df_sbsa3_wide_profdiff$openn_sqdiff_t1
D = 0.18609, p-value = 3.017e-14
# descriptives
psych::describe(df_sbsa3_wide_profdiff$openn_sqdiff_t1)
   vars   n mean   sd median trimmed  mad min  max range skew kurtosis   se
X1    1 174  0.4 0.74   0.11    0.24 0.15   0 6.67  6.67 4.41    29.21 0.06
psych::describe(df_sbsa3_wide_profdiff$openn_sqdiff_t2)
   vars   n mean  sd median trimmed  mad min  max range skew kurtosis   se
X1    1 155 0.34 0.5   0.17    0.23 0.25   0 3.06  3.06 2.81     9.45 0.04

Results summary:

mod_traits_sqdiff <- df_sbsa3_wide_profdiff %>% 
  select(-starts_with("profile")) %>% 
  pivot_longer(-c(pid), 
               names_to = c("test", "time"), values_to = "score", 
               names_pattern = "(.*)_(t1|t2)") %>% 
  filter(!is.na(score)) %>% 
  group_by(pid, test) %>% 
  mutate(assessments = n()) %>% 
  ungroup() %>% 
  filter(assessments==2) %>% 
  select(-assessments) %>% 
  group_nest(test) %>% 
  mutate(t_tests = map(data, ~t.test(score ~ relevel(as.factor(time), "t2"), # need to relevel this to get T2-T1 diff (not sure why)
                                     data = .x, paired = TRUE))) %>% 
  pull(t_tests) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

mod_traits_unlist <- mod_traits_sqdiff[[1]] %>% tidy()
for (i in 2:length(mod_traits_sqdiff)) {
  mod_traits_unlist <- bind_rows(mod_traits_unlist, mod_traits_sqdiff[[i]] %>% tidy())
}

kable(mod_traits_unlist %>% mutate(outcome = sort(names(b5_vars))) %>% 
        select(outcome, everything()) %>% arrange(factor(outcome, levels = (names(b5_vars)))), digits = 3) # order by BFI traits
outcome estimate statistic p.value parameter conf.low conf.high method alternative
extraversion -0.457 -4.400 0.000 153 -0.662 -0.252 Paired t-test two.sided
agreeableness -0.102 -2.543 0.012 153 -0.181 -0.023 Paired t-test two.sided
conscientiousness -0.352 -3.106 0.002 153 -0.577 -0.128 Paired t-test two.sided
neuroticism -0.327 -2.214 0.028 153 -0.618 -0.035 Paired t-test two.sided
openness -0.076 -1.632 0.105 153 -0.168 0.016 Paired t-test two.sided
sociability -0.769 -4.161 0.000 153 -1.135 -0.404 Paired t-test two.sided
assertiveness -0.358 -2.581 0.011 153 -0.631 -0.084 Paired t-test two.sided
energy -0.279 -1.739 0.084 153 -0.596 0.038 Paired t-test two.sided
compassion 0.070 1.017 0.311 153 -0.066 0.205 Paired t-test two.sided
respectfulness -0.047 -0.845 0.399 153 -0.159 0.064 Paired t-test two.sided
trust -0.384 -2.876 0.005 153 -0.647 -0.120 Paired t-test two.sided
organization -0.441 -2.412 0.017 153 -0.802 -0.080 Paired t-test two.sided
productiveness -0.673 -3.139 0.002 153 -1.096 -0.249 Paired t-test two.sided
responsibility -0.211 -2.119 0.036 153 -0.408 -0.014 Paired t-test two.sided
anxiety -0.390 -1.524 0.130 153 -0.895 0.116 Paired t-test two.sided
depression -0.588 -2.776 0.006 153 -1.006 -0.170 Paired t-test two.sided
volatility -0.181 -0.910 0.364 153 -0.574 0.212 Paired t-test two.sided
curiosity -0.103 -1.400 0.164 153 -0.249 0.042 Paired t-test two.sided
aesthetic -0.041 -0.450 0.654 153 -0.219 0.138 Paired t-test two.sided
imagination -0.146 -1.281 0.202 153 -0.370 0.079 Paired t-test two.sided
# effect sizes
cohend_cust = function(outcome, df) {
  form = as.formula(paste0(outcome, " ~ time"))
  psych::cohen.d(form, data = df)
}

d_trait <- cohend_cust(paste0(str_trunc(names(b5_vars)[1], 5, ellipsis = ""), "_sqdiff"), 
                       df_sbsa3 %>% filter(group=="Group 1"))
d_traits_unlist <- as_tibble(d_trait$cohen.d)
for (i in 2:length(names(b5_vars))) {
  d_trait <- cohend_cust(paste0(str_trunc(names(b5_vars)[i], 5, ellipsis = ""), "_sqdiff"), 
                         df_sbsa3 %>% filter(group=="Group 1"))
  d_traits_unlist <- bind_rows(d_traits_unlist, as_tibble(d_trait$cohen.d))
}
kable(d_traits_unlist %>% mutate(outcome = names(b5_vars)) %>% select(outcome, everything()), digits = 3)
outcome lower effect upper
extraversion -0.424 -0.207 0.010
agreeableness -0.427 -0.210 0.007
conscientiousness -0.334 -0.118 0.099
neuroticism -0.307 -0.090 0.127
openness -0.316 -0.099 0.117
sociability -0.465 -0.248 -0.030
assertiveness -0.341 -0.125 0.092
energy -0.307 -0.090 0.126
compassion -0.118 0.099 0.315
respectfulness -0.292 -0.076 0.141
trust -0.461 -0.244 -0.026
organization -0.328 -0.112 0.105
productiveness -0.394 -0.178 0.040
responsibility -0.291 -0.074 0.142
anxiety -0.289 -0.073 0.144
depression -0.350 -0.133 0.084
volatility -0.264 -0.048 0.169
curiosity -0.293 -0.077 0.140
aesthetic -0.262 -0.046 0.171
imagination -0.280 -0.063 0.153

[Note: These CIs for d are probably “wrong” -> not appropriate for paired t-test]

Significantly lower squared mean-level differences at the second measurement occasion for extraversion and conscientiousness, but not for the rest of the Big Five traits and their 15 BFI-2 facets (at p < .01). Except for depression where the squared mean-level differences also decline significantly.

Plot (traits/facets sorted by average size of squared differences)

sqdiff_df_plot <- df_sbsa3 %>% 
  filter(group=="Group 1") %>% 
  select(pid, time, all_of(paste0(str_trunc(names(b5_vars), 5, ellipsis = ""), "_sqdiff"))) %>% 
  pivot_longer(-c(pid, time), names_to = "trait", values_to = "sqdiff") %>% 
  group_by(trait) %>% 
  mutate(cur_group_id()) %>% 
  ungroup() %>% 
  mutate(trait2 = fct_recode(trait, !!! setNames(paste0(str_trunc(names(b5_vars), 5, ellipsis = ""), "_sqdiff"),
                                                 str_to_title(names(b5_vars)))),
         trait2 = fct_reorder(trait2, sqdiff, .desc = T))

plot_sqdiff <- ggplot(sqdiff_df_plot) + 
  aes(x = as.factor(time), y = sqdiff) + 
  geom_boxplot() + 
  geom_violin(fill = NA) +
  ggforce::facet_wrap_paginate(~trait2, ncol=3, nrow=2) +
  labs(x = "Measurement Occasion", y = "Squared difference") + 
  theme_bw()

plot_sqdiff + ggforce::facet_wrap_paginate(~trait2, ncol=4, nrow=1, page = 1)

plot_sqdiff + ggforce::facet_wrap_paginate(~trait2, ncol=4, nrow=1, page = 2)

plot_sqdiff + ggforce::facet_wrap_paginate(~trait2, ncol=4, nrow=1, page = 3)

plot_sqdiff + ggforce::facet_wrap_paginate(~trait2, ncol=4, nrow=1, page = 4)

plot_sqdiff + ggforce::facet_wrap_paginate(~trait2, ncol=4, nrow=1, page = 5)

Distributions only look different for some of the facets (mostly E and C plus .


6.4 Latent change in Big Five

Amount of change on the latent level collapsed across all groups.

We will test the mean-level difference between baseline and follow up using a latent change model.

6.4.1 Extraversion

Fit model:

Show the code
# Fit the multiple indicator Univariate Latent Change Score model
mi_lcs_extra_hyp2 <- '
extra_t1 =~ 1*extra_curr_par1_t1 + lamb2*extra_curr_par2_t1 + lamb3*extra_curr_par3_t1 # Measurement model T1
extra_t2 =~ 1*extra_curr_par1_t2 + lamb2*extra_curr_par2_t2 + lamb3*extra_curr_par3_t2 # Measurement model T2 with the equality constrained factor loadings

extra_t2 ~ 1*extra_t1     # This parameter regresses extra_t2 perfectly on extra_t1
d_extra_1 =~ 1*extra_t2   # This defines the latent change score factor as measured perfectly by scores on extra_t2
extra_t2 ~ 0*1           # This line constrains the intercept of extra_t2 to 0
extra_t2 ~~ 0*extra_t2    # This fixes the variance of extra_t2 to 0

d_extra_1 ~ 1           # This estimates the intercept of the change score 
extra_t1 ~ 1            # This estimates the intercept of extra_t1 
d_extra_1 ~~ d_extra_1   # This estimates the variance of the change scores 
extra_t1 ~~ extra_t1     # This estimates the variance of the extra_t1 
d_extra_1 ~~ extra_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

extra_curr_par1_t1 ~~ extra_curr_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
extra_curr_par2_t1 ~~ extra_curr_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
extra_curr_par3_t1 ~~ extra_curr_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

extra_curr_par1_t1 ~~ res1*extra_curr_par1_t1   # This allows residual variance on indicator X1 at T1 
extra_curr_par2_t1 ~~ res2*extra_curr_par2_t1   # This allows residual variance on indicator X2 at T1
extra_curr_par3_t1 ~~ res3*extra_curr_par3_t1   # This allows residual variance on indicator X3 at T1

extra_curr_par1_t2 ~~ res1*extra_curr_par1_t2  # This allows residual variance on indicator X1 at T2 
extra_curr_par2_t2 ~~ res2*extra_curr_par2_t2  # This allows residual variance on indicator X2 at T2 
extra_curr_par3_t2 ~~ res3*extra_curr_par3_t2  # This allows residual variance on indicator X3 at T2

extra_curr_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
extra_curr_par2_t1 ~ m2*1     # This estimates the intercept of X2 at T1
extra_curr_par3_t1 ~ m3*1     # This estimates the intercept of X3 at T1

extra_curr_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
extra_curr_par2_t2 ~ m2*1     # This estimates the intercept of X2 at T2
extra_curr_par3_t2 ~ m3*1     # This estimates the intercept of X3 at T2
'
fit_mi_lcs_extra_hyp2 <- lavaan(mi_lcs_extra_hyp2, data=df_sbsa3_wide_pers, estimator='mlr', fixed.x=FALSE, missing='fiml')
summary(fit_mi_lcs_extra_hyp2, fit.measures=TRUE, standardized=TRUE, rsquare=F)

Results summary:

# model fit
kable(broom::glance(fit_mi_lcs_extra_hyp2) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
530 15 4920.539 4984.632 1 0 0.017
# parameters of interest
params_lcs_extra_hyp2 <- broom::tidy(fit_mi_lcs_extra_hyp2, conf.int = TRUE, conf.level = 0.99) %>% 
        select(term, estimate, conf.low, conf.high, std.all, statistic, p.value) %>% 
        filter(term %in% c("extra_t2 ~ extra_t1", "d_extra_1 =~ extra_t2", "extra_t1 ~~ d_extra_1", # change parameters
                           "d_extra_1 ~1 ", "extra_t1 ~1 ", "", # means
                           "d_extra_1 ~~ d_extra_1")) # variances
kable(params_lcs_extra_hyp2, digits = 3)
term estimate conf.low conf.high std.all statistic p.value
extra_t2 ~ extra_t1 1.000 1.000 1.000 0.989 NA NA
d_extra_1 =~ extra_t2 1.000 1.000 1.000 0.329 NA NA
d_extra_1 ~1 0.043 0.000 0.085 0.180 2.562 0.01
extra_t1 ~1 3.078 2.987 3.170 4.314 86.648 0.00
d_extra_1 ~~ d_extra_1 0.056 0.026 0.086 1.000 4.835 0.00
extra_t1 ~~ d_extra_1 -0.023 -0.059 0.014 -0.134 -1.597 0.11

Significantly higher extraversion at the post test, b = 0.043, beta = 0.056, p = 0.01.

6.4.2 Agreeableness

Fit model:

Show the code
# Fit the multiple indicator Univariate Latent Change Score model
mi_lcs_agree_hyp2 <- '
agree_t1 =~ 1*agree_curr_par1_t1 + lamb2*agree_curr_par2_t1 + lamb3*agree_curr_par3_t1 # Measurement model T1
agree_t2 =~ 1*agree_curr_par1_t2 + lamb2*agree_curr_par2_t2 + lamb3*agree_curr_par3_t2 # Measurement model T2 with the equality constrained factor loadings

agree_t2 ~ 1*agree_t1     # This parameter regresses agree_t2 perfectly on agree_t1
d_agree_1 =~ 1*agree_t2   # This defines the latent change score factor as measured perfectly by scores on agree_t2
agree_t2 ~ 0*1           # This line constrains the intercept of agree_t2 to 0
agree_t2 ~~ 0*agree_t2    # This fixes the variance of agree_t2 to 0

d_agree_1 ~ 1           # This estimates the intercept of the change score 
agree_t1 ~ 1            # This estimates the intercept of agree_t1 
d_agree_1 ~~ d_agree_1   # This estimates the variance of the change scores 
agree_t1 ~~ agree_t1     # This estimates the variance of the agree_t1 
d_agree_1 ~~ agree_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

agree_curr_par1_t1 ~~ agree_curr_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
agree_curr_par2_t1 ~~ agree_curr_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
agree_curr_par3_t1 ~~ agree_curr_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

agree_curr_par1_t1 ~~ res1*agree_curr_par1_t1   # This allows residual variance on indicator X1 at T1 
agree_curr_par2_t1 ~~ res2*agree_curr_par2_t1   # This allows residual variance on indicator X2 at T1
agree_curr_par3_t1 ~~ res3*agree_curr_par3_t1   # This allows residual variance on indicator X3 at T1

agree_curr_par1_t2 ~~ res1*agree_curr_par1_t2  # This allows residual variance on indicator X1 at T2 
agree_curr_par2_t2 ~~ res2*agree_curr_par2_t2  # This allows residual variance on indicator X2 at T2 
agree_curr_par3_t2 ~~ res3*agree_curr_par3_t2  # This allows residual variance on indicator X3 at T2

agree_curr_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
agree_curr_par2_t1 ~ m2*1     # This estimates the intercept of X2 at T1
agree_curr_par3_t1 ~ m3*1     # This estimates the intercept of X3 at T1

agree_curr_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
agree_curr_par2_t2 ~ m2*1     # This estimates the intercept of X2 at T2
agree_curr_par3_t2 ~ m3*1     # This estimates the intercept of X3 at T2
'
fit_mi_lcs_agree_hyp2 <- lavaan(mi_lcs_agree_hyp2, data=df_sbsa3_wide_pers, estimator='mlr', fixed.x=FALSE, missing='fiml')
summary(fit_mi_lcs_agree_hyp2, fit.measures=TRUE, standardized=TRUE, rsquare=F)

Results summary:

# model fit
kable(broom::glance(fit_mi_lcs_agree_hyp2) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
530 15 4787.496 4851.589 0.987 0.057 0.03
# parameters of interest
params_lcs_agree_hyp2 <- broom::tidy(fit_mi_lcs_agree_hyp2, conf.int = TRUE, conf.level = 0.99) %>% 
        select(term, estimate, conf.low, conf.high, std.all, statistic, p.value) %>% 
        filter(term %in% c("agree_t2 ~ agree_t1", "d_agree_1 =~ agree_t2", "agree_t1 ~~ d_agree_1", # change parameters
                           "d_agree_1 ~1 ", "agree_t1 ~1 ", "", # means
                           "d_agree_1 ~~ d_agree_1")) # variances
kable(params_lcs_agree_hyp2, digits = 3)
term estimate conf.low conf.high std.all statistic p.value
agree_t2 ~ agree_t1 1.000 1.000 1.000 0.999 NA NA
d_agree_1 =~ agree_t2 1.000 1.000 1.000 0.502 NA NA
d_agree_1 ~1 0.026 -0.014 0.066 0.110 1.687 0.092
agree_t1 ~1 3.732 3.657 3.807 7.943 127.758 0.000
d_agree_1 ~~ d_agree_1 0.056 0.018 0.094 1.000 3.789 0.000
agree_t1 ~~ d_agree_1 -0.028 -0.052 -0.003 -0.249 -2.937 0.003

No significant changes in agreeableness from pre to post test, b = 0.026, beta = 0.044, p = 0.092.

6.4.3 Conscientiousness

Fit model:

Show the code
# Fit the multiple indicator Univariate Latent Change Score model
mi_lcs_consc_hyp2 <- '
consc_t1 =~ 1*consc_curr_par1_t1 + lamb2*consc_curr_par2_t1 + lamb3*consc_curr_par3_t1 # Measurement model T1
consc_t2 =~ 1*consc_curr_par1_t2 + lamb2*consc_curr_par2_t2 + lamb3*consc_curr_par3_t2 # Measurement model T2 with the equality constrained factor loadings

consc_t2 ~ 1*consc_t1     # This parameter regresses consc_t2 perfectly on consc_t1
d_consc_1 =~ 1*consc_t2   # This defines the latent change score factor as measured perfectly by scores on consc_t2
consc_t2 ~ 0*1           # This line constrains the intercept of consc_t2 to 0
consc_t2 ~~ 0*consc_t2    # This fixes the variance of consc_t2 to 0

d_consc_1 ~ 1           # This estimates the intercept of the change score 
consc_t1 ~ 1            # This estimates the intercept of consc_t1 
d_consc_1 ~~ d_consc_1   # This estimates the variance of the change scores 
consc_t1 ~~ consc_t1     # This estimates the variance of the consc_t1 
d_consc_1 ~~ consc_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

consc_curr_par1_t1 ~~ consc_curr_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
consc_curr_par2_t1 ~~ consc_curr_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
consc_curr_par3_t1 ~~ consc_curr_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

consc_curr_par1_t1 ~~ res1*consc_curr_par1_t1   # This allows residual variance on indicator X1 at T1 
consc_curr_par2_t1 ~~ res2*consc_curr_par2_t1   # This allows residual variance on indicator X2 at T1
consc_curr_par3_t1 ~~ res3*consc_curr_par3_t1   # This allows residual variance on indicator X3 at T1

consc_curr_par1_t2 ~~ res1*consc_curr_par1_t2  # This allows residual variance on indicator X1 at T2 
consc_curr_par2_t2 ~~ res2*consc_curr_par2_t2  # This allows residual variance on indicator X2 at T2 
consc_curr_par3_t2 ~~ res3*consc_curr_par3_t2  # This allows residual variance on indicator X3 at T2

consc_curr_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
consc_curr_par2_t1 ~ m2*1     # This estimates the intercept of X2 at T1
consc_curr_par3_t1 ~ m3*1     # This estimates the intercept of X3 at T1

consc_curr_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
consc_curr_par2_t2 ~ m2*1     # This estimates the intercept of X2 at T2
consc_curr_par3_t2 ~ m3*1     # This estimates the intercept of X3 at T2
'
fit_mi_lcs_consc_hyp2 <- lavaan(mi_lcs_consc_hyp2, data=df_sbsa3_wide_pers, estimator='mlr', fixed.x=FALSE, missing='fiml')
summary(fit_mi_lcs_consc_hyp2, fit.measures=TRUE, standardized=TRUE, rsquare=F)

Results summary:

# model fit
kable(broom::glance(fit_mi_lcs_consc_hyp2) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
530 15 4835.468 4899.561 0.998 0.024 0.013
# parameters of interest
params_lcs_consc_hyp2 <- broom::tidy(fit_mi_lcs_consc_hyp2, conf.int = TRUE, conf.level = 0.99) %>% 
        select(term, estimate, conf.low, conf.high, std.all, statistic, p.value) %>% 
        filter(term %in% c("consc_t2 ~ consc_t1", "d_consc_1 =~ consc_t2", "consc_t1 ~~ d_consc_1", # change parameters
                           "d_consc_1 ~1 ", "consc_t1 ~1 ", "", # means
                           "d_consc_1 ~~ d_consc_1")) # variances
kable(params_lcs_consc_hyp2, digits = 3)
term estimate conf.low conf.high std.all statistic p.value
consc_t2 ~ consc_t1 1.000 1.000 1.000 1.029 NA NA
d_consc_1 =~ consc_t2 1.000 1.000 1.000 0.386 NA NA
d_consc_1 ~1 0.068 0.023 0.112 0.255 3.899 0
consc_t1 ~1 3.161 3.070 3.252 4.467 89.194 0
d_consc_1 ~~ d_consc_1 0.070 0.038 0.103 1.000 5.535 0
consc_t1 ~~ d_consc_1 -0.049 -0.085 -0.014 -0.262 -3.577 0

Significantly higher conscientiousness at the post test, b = 0.068, beta = 0.09, p = 0.

6.4.4 Neuroticism

Fit model:

Show the code
# Fit the multiple indicator Univariate Latent Change Score model
mi_lcs_neuro_hyp2 <- '
neuro_t1 =~ 1*neuro_curr_par1_t1 + lamb2*neuro_curr_par2_t1 + lamb3*neuro_curr_par3_t1 # Measurement model T1
neuro_t2 =~ 1*neuro_curr_par1_t2 + lamb2*neuro_curr_par2_t2 + lamb3*neuro_curr_par3_t2 # Measurement model T2 with the equality constrained factor loadings

neuro_t2 ~ 1*neuro_t1     # This parameter regresses neuro_t2 perfectly on neuro_t1
d_neuro_1 =~ 1*neuro_t2   # This defines the latent change score factor as measured perfectly by scores on neuro_t2
neuro_t2 ~ 0*1           # This line constrains the intercept of neuro_t2 to 0
neuro_t2 ~~ 0*neuro_t2    # This fixes the variance of neuro_t2 to 0

d_neuro_1 ~ 1           # This estimates the intercept of the change score 
neuro_t1 ~ 1            # This estimates the intercept of neuro_t1 
d_neuro_1 ~~ d_neuro_1   # This estimates the variance of the change scores 
neuro_t1 ~~ neuro_t1     # This estimates the variance of the neuro_t1 
d_neuro_1 ~~ neuro_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

neuro_curr_par1_t1 ~~ neuro_curr_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
neuro_curr_par2_t1 ~~ neuro_curr_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
neuro_curr_par3_t1 ~~ neuro_curr_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

neuro_curr_par1_t1 ~~ res1*neuro_curr_par1_t1   # This allows residual variance on indicator X1 at T1 
neuro_curr_par2_t1 ~~ res2*neuro_curr_par2_t1   # This allows residual variance on indicator X2 at T1
neuro_curr_par3_t1 ~~ res3*neuro_curr_par3_t1   # This allows residual variance on indicator X3 at T1

neuro_curr_par1_t2 ~~ res1*neuro_curr_par1_t2  # This allows residual variance on indicator X1 at T2 
neuro_curr_par2_t2 ~~ res2*neuro_curr_par2_t2  # This allows residual variance on indicator X2 at T2 
neuro_curr_par3_t2 ~~ res3*neuro_curr_par3_t2  # This allows residual variance on indicator X3 at T2

neuro_curr_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
neuro_curr_par2_t1 ~ m2*1     # This estimates the intercept of X2 at T1
neuro_curr_par3_t1 ~ m3*1     # This estimates the intercept of X3 at T1

neuro_curr_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
neuro_curr_par2_t2 ~ m2*1     # This estimates the intercept of X2 at T2
neuro_curr_par3_t2 ~ m3*1     # This estimates the intercept of X3 at T2
'
fit_mi_lcs_neuro_hyp2 <- lavaan(mi_lcs_neuro_hyp2, data=df_sbsa3_wide_pers, estimator='mlr', fixed.x=FALSE, missing='fiml')
summary(fit_mi_lcs_neuro_hyp2, fit.measures=TRUE, standardized=TRUE, rsquare=F)

Results summary:

# model fit
kable(broom::glance(fit_mi_lcs_neuro_hyp2) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
530 15 4921.769 4985.862 0.999 0.026 0.015
# parameters of interest
params_lcs_neuro_hyp2 <- broom::tidy(fit_mi_lcs_neuro_hyp2, conf.int = TRUE, conf.level = 0.99) %>% 
        select(term, estimate, conf.low, conf.high, std.all, statistic, p.value) %>% 
        filter(term %in% c("neuro_t2 ~ neuro_t1", "d_neuro_1 =~ neuro_t2", "neuro_t1 ~~ d_neuro_1", # change parameters
                           "d_neuro_1 ~1 ", "neuro_t1 ~1 ", "", # means
                           "d_neuro_1 ~~ d_neuro_1")) # variances
kable(params_lcs_neuro_hyp2, digits = 3)
term estimate conf.low conf.high std.all statistic p.value
neuro_t2 ~ neuro_t1 1.000 1.000 1.000 1.018 NA NA
d_neuro_1 =~ neuro_t2 1.000 1.000 1.000 0.400 NA NA
d_neuro_1 ~1 -0.053 -0.101 -0.005 -0.167 -2.821 0.005
neuro_t1 ~1 3.130 3.029 3.231 3.889 79.932 0.000
d_neuro_1 ~~ d_neuro_1 0.100 0.062 0.139 1.000 6.698 0.000
neuro_t1 ~~ d_neuro_1 -0.061 -0.102 -0.021 -0.241 -3.887 0.000

Significantly lower neuroticism at the post test, b = -0.053, beta = -0.059, p = 0.005.

6.4.5 Openness

Fit model:

Show the code
# Fit the multiple indicator Univariate Latent Change Score model
mi_lcs_openn_hyp2 <- '
openn_t1 =~ 1*openn_curr_par1_t1 + lamb2*openn_curr_par2_t1 + lamb3*openn_curr_par3_t1 # Measurement model T1
openn_t2 =~ 1*openn_curr_par1_t2 + lamb2*openn_curr_par2_t2 + lamb3*openn_curr_par3_t2 # Measurement model T2 with the equality constrained factor loadings

openn_t2 ~ 1*openn_t1     # This parameter regresses openn_t2 perfectly on openn_t1
d_openn_1 =~ 1*openn_t2   # This defines the latent change score factor as measured perfectly by scores on openn_t2
openn_t2 ~ 0*1           # This line constrains the intercept of openn_t2 to 0
openn_t2 ~~ 0*openn_t2    # This fixes the variance of openn_t2 to 0

d_openn_1 ~ 1           # This estimates the intercept of the change score 
openn_t1 ~ 1            # This estimates the intercept of openn_t1 
d_openn_1 ~~ d_openn_1   # This estimates the variance of the change scores 
openn_t1 ~~ openn_t1     # This estimates the variance of the openn_t1 
d_openn_1 ~~ openn_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

openn_curr_par1_t1 ~~ openn_curr_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
openn_curr_par2_t1 ~~ openn_curr_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
openn_curr_par3_t1 ~~ openn_curr_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

openn_curr_par1_t1 ~~ res1*openn_curr_par1_t1   # This allows residual variance on indicator X1 at T1 
openn_curr_par2_t1 ~~ res2*openn_curr_par2_t1   # This allows residual variance on indicator X2 at T1
openn_curr_par3_t1 ~~ res3*openn_curr_par3_t1   # This allows residual variance on indicator X3 at T1

openn_curr_par1_t2 ~~ res1*openn_curr_par1_t2  # This allows residual variance on indicator X1 at T2 
openn_curr_par2_t2 ~~ res2*openn_curr_par2_t2  # This allows residual variance on indicator X2 at T2 
openn_curr_par3_t2 ~~ res3*openn_curr_par3_t2  # This allows residual variance on indicator X3 at T2

openn_curr_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
openn_curr_par2_t1 ~ m2*1     # This estimates the intercept of X2 at T1
openn_curr_par3_t1 ~ m3*1     # This estimates the intercept of X3 at T1

openn_curr_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
openn_curr_par2_t2 ~ m2*1     # This estimates the intercept of X2 at T2
openn_curr_par3_t2 ~ m3*1     # This estimates the intercept of X3 at T2
'
fit_mi_lcs_openn_hyp2 <- lavaan(mi_lcs_openn_hyp2, data=df_sbsa3_wide_pers, estimator='mlr', fixed.x=FALSE, missing='fiml')
summary(fit_mi_lcs_openn_hyp2, fit.measures=TRUE, standardized=TRUE, rsquare=F)

Results summary:

# model fit
kable(broom::glance(fit_mi_lcs_openn_hyp2) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
530 15 4997.663 5061.757 0.999 0.017 0.014
# parameters of interest
params_lcs_openn_hyp2 <- broom::tidy(fit_mi_lcs_openn_hyp2, conf.int = TRUE, conf.level = 0.99) %>% 
        select(term, estimate, conf.low, conf.high, std.all, statistic, p.value) %>% 
        filter(term %in% c("openn_t2 ~ openn_t1", "d_openn_1 =~ openn_t2", "openn_t1 ~~ d_openn_1", # change parameters
                           "d_openn_1 ~1 ", "openn_t1 ~1 ", "", # means
                           "d_openn_1 ~~ d_openn_1")) # variances
kable(params_lcs_openn_hyp2, digits = 3)
term estimate conf.low conf.high std.all statistic p.value
openn_t2 ~ openn_t1 1.000 1.000 1.000 1.014 NA NA
d_openn_1 =~ openn_t2 1.000 1.000 1.000 0.335 NA NA
d_openn_1 ~1 0.019 -0.015 0.053 0.117 1.422 0.155
openn_t1 ~1 3.482 3.399 3.565 7.209 108.489 0.000
d_openn_1 ~~ d_openn_1 0.026 0.003 0.048 1.000 2.902 0.004
openn_t1 ~~ d_openn_1 -0.016 -0.036 0.004 -0.206 -2.040 0.041

No significant changes in openness from pre to post test, b = 0.019, beta = 0.029, p = 0.155.


7 Differences in change across groups (SEM)

We will explore a) whether change in psychological well-being indicators as well as the difference between real- and ideal- self will differ across groups at follow-up.

Create ordered factors first:

df_sbsa3_wide_wb <- df_sbsa3_wide_wb %>% 
  mutate(group = factor(group, ordered = T)) %>% 
  arrange(group, pid)

df_sbsa3_wide_pers <- df_sbsa3_wide_pers %>% 
  mutate(group = factor(group, ordered = T)) %>% 
  arrange(group, pid)

7.1 Group 1 vs. Group 2: Effect of discrepancy awareness

7.1.1 Well-being change: differences across groups

7.1.1.1 Life satisfaction

Life satisfaction: fitting multi-group models

Show the code
# adapt latent change score model from above and add grouping factor in estimation (also add vectorized equality constraints to the model)

# configural invariance
mi_lcs_swls_group_config <- '
swls_t1 =~ 1*sw06_01_t1 + c("lamb2a", "lamb2b")*sw06_02_t1 + c("lamb3a", "lamb3b")*sw06_03_t1 + c("lamb4a", "lamb4b")*sw06_04_t1 # This specifies the measurement model for swls_t1 
swls_t2 =~ 1*sw06_01_t2 + c("lamb2a", "lamb2b")*sw06_02_t2 + c("lamb3a", "lamb3b")*sw06_03_t2 + c("lamb4a", "lamb4b")*sw06_04_t2 # This specifies the measurement model for swls_t2 with the equality constrained factor loadings

swls_t2 ~ 1*swls_t1     # This parameter regresses swls_t2 perfectly on swls_t1
d_swls_1 =~ 1*swls_t2   # This defines the latent change score factor as measured perfectly by scores on swls_t2
swls_t2 ~ 0*1           # This line constrains the intercept of swls_t2 to 0
swls_t2 ~~ 0*swls_t2    # This fixes the variance of swls_t2 to 0

d_swls_1 ~ c("d_int_a", "d_int_b")*1           # This estimates the intercept of the change score 
swls_t1 ~ c("wb_int_a", "wb_int_b")*1            # This estimates the intercept of swls_t1 
d_swls_1 ~~ c("d_var_a", "d_var_b")*d_swls_1   # This estimates the variance of the change scores 
swls_t1 ~~ c("wb_var_a", "wb_var_b")*swls_t1     # This estimates the variance of the swls_t1 
d_swls_1 ~~ c("fb_a", "fb_b")*swls_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score remains unconditional

sw06_01_t1 ~~ c("cov1a", "cov1b")*sw06_01_t2   # This allows residual covariance on indicator X1 across T1 and T2
sw06_02_t1 ~~ c("cov2a", "cov2b")*sw06_02_t2   # This allows residual covariance on indicator X2 across T1 and T2
sw06_03_t1 ~~ c("cov3a", "cov3b")*sw06_03_t2   # This allows residual covariance on indicator X3 across T1 and T2
sw06_04_t1 ~~ c("cov4a", "cov4b")*sw06_04_t2   # This allows residual covariance on indicator X4 across T1 and T2

sw06_01_t1 ~~ c("res1a", "res1b")*sw06_01_t1   # This allows residual variance on indicator X1 at T1 
sw06_02_t1 ~~ c("res2a", "res2b")*sw06_02_t1   # This allows residual variance on indicator X2 at T1
sw06_03_t1 ~~ c("res3a", "res3b")*sw06_03_t1   # This allows residual variance on indicator X3 at T1
sw06_04_t1 ~~ c("res4a", "res4b")*sw06_04_t1   # This allows residual variance on indicator X4 at T1

sw06_01_t2 ~~ c("res1a", "res1b")*sw06_01_t2  # This allows residual variance on indicator X1 at T2 
sw06_02_t2 ~~ c("res2a", "res2b")*sw06_02_t2  # This allows residual variance on indicator X2 at T2 
sw06_03_t2 ~~ c("res3a", "res3b")*sw06_03_t2  # This allows residual variance on indicator X3 at T2
sw06_04_t2 ~~ c("res4a", "res4b")*sw06_04_t2  # This allows residual variance on indicator X4 at T2

sw06_01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
sw06_02_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
sw06_03_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1
sw06_04_t1 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T1

sw06_01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
sw06_02_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
sw06_03_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
sw06_04_t2 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T2
'
lcs_swls_group_12_config <- sem(mi_lcs_swls_group_config, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', 
                             fixed.x=FALSE, missing='fiml', group = "group")

# weak invariance
mi_lcs_swls_group_weak <- '
swls_t1 =~ 1*sw06_01_t1 + c("lamb2", "lamb2")*sw06_02_t1 + c("lamb3", "lamb3")*sw06_03_t1 + c("lamb4", "lamb4")*sw06_04_t1 # This specifies the measurement model for swls_t1 
swls_t2 =~ 1*sw06_01_t2 + c("lamb2", "lamb2")*sw06_02_t2 + c("lamb3", "lamb3")*sw06_03_t2 + c("lamb4", "lamb4")*sw06_04_t2 # This specifies the measurement model for swls_t2 with the equality constrained factor loadings

swls_t2 ~ 1*swls_t1     # This parameter regresses swls_t2 perfectly on swls_t1
d_swls_1 =~ 1*swls_t2   # This defines the latent change score factor as measured perfectly by scores on swls_t2
swls_t2 ~ 0*1           # This line constrains the intercept of swls_t2 to 0
swls_t2 ~~ 0*swls_t2    # This fixes the variance of swls_t2 to 0

d_swls_1 ~ c("d_int_a", "d_int_b")*1           # This estimates the intercept of the change score 
swls_t1 ~ c("wb_int_a", "wb_int_b")*1            # This estimates the intercept of swls_t1 
d_swls_1 ~~ c("d_var_a", "d_var_b")*d_swls_1   # This estimates the variance of the change scores 
swls_t1 ~~ c("wb_var_a", "wb_var_b")*swls_t1     # This estimates the variance of the swls_t1 
d_swls_1 ~~ c("fb_a", "fb_b")*swls_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

sw06_01_t1 ~~ c("cov1a", "cov1b")*sw06_01_t2   # This allows residual covariance on indicator X1 across T1 and T2
sw06_02_t1 ~~ c("cov2a", "cov2b")*sw06_02_t2   # This allows residual covariance on indicator X2 across T1 and T2
sw06_03_t1 ~~ c("cov3a", "cov3b")*sw06_03_t2   # This allows residual covariance on indicator X3 across T1 and T2
sw06_04_t1 ~~ c("cov4a", "cov4b")*sw06_04_t2   # This allows residual covariance on indicator X4 across T1 and T2

sw06_01_t1 ~~ c("res1a", "res1b")*sw06_01_t1   # This allows residual variance on indicator X1 at T1 
sw06_02_t1 ~~ c("res2a", "res2b")*sw06_02_t1   # This allows residual variance on indicator X2 at T1
sw06_03_t1 ~~ c("res3a", "res3b")*sw06_03_t1   # This allows residual variance on indicator X3 at T1
sw06_04_t1 ~~ c("res4a", "res4b")*sw06_04_t1   # This allows residual variance on indicator X4 at T1

sw06_01_t2 ~~ c("res1a", "res1b")*sw06_01_t2  # This allows residual variance on indicator X1 at T2 
sw06_02_t2 ~~ c("res2a", "res2b")*sw06_02_t2  # This allows residual variance on indicator X2 at T2 
sw06_03_t2 ~~ c("res3a", "res3b")*sw06_03_t2  # This allows residual variance on indicator X3 at T2
sw06_04_t2 ~~ c("res4a", "res4b")*sw06_04_t2  # This allows residual variance on indicator X4 at T2

sw06_01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
sw06_02_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
sw06_03_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1
sw06_04_t1 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T1

sw06_01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
sw06_02_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
sw06_03_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
sw06_04_t2 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T2
'

lcs_swls_group_12_weak <- sem(mi_lcs_swls_group_weak, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                           group = "group", group.equal = "loadings")

# strong invariance
mi_lcs_swls_group_strong <- '
swls_t1 =~ 1*sw06_01_t1 + c("lamb2", "lamb2")*sw06_02_t1 + c("lamb3", "lamb3")*sw06_03_t1 + c("lamb4", "lamb4")*sw06_04_t1 # This specifies the measurement model for swls_t1 
swls_t2 =~ 1*sw06_01_t2 + c("lamb2", "lamb2")*sw06_02_t2 + c("lamb3", "lamb3")*sw06_03_t2 + c("lamb4", "lamb4")*sw06_04_t2 # This specifies the measurement model for swls_t2 with the equality constrained factor loadings

swls_t2 ~ 1*swls_t1     # This parameter regresses swls_t2 perfectly on swls_t1
d_swls_1 =~ 1*swls_t2   # This defines the latent change score factor as measured perfectly by scores on swls_t2
swls_t2 ~ 0*1           # This line constrains the intercept of swls_t2 to 0
swls_t2 ~~ 0*swls_t2    # This fixes the variance of swls_t2 to 0

d_swls_1 ~ c("d_int", "d_int")*1           # This estimates the intercept of the change score 
swls_t1 ~ c("wb_int", "wb_int")*1            # This estimates the intercept of swls_t1 
d_swls_1 ~~ c("d_var_a", "d_var_b")*d_swls_1   # This estimates the variance of the change scores 
swls_t1 ~~ c("wb_var_a", "wb_var_b")*swls_t1     # This estimates the variance of the swls_t1 
d_swls_1 ~~ c("fb_a", "fb_b")*swls_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

sw06_01_t1 ~~ c("cov1a", "cov1b")*sw06_01_t2   # This allows residual covariance on indicator X1 across T1 and T2
sw06_02_t1 ~~ c("cov2a", "cov2b")*sw06_02_t2   # This allows residual covariance on indicator X2 across T1 and T2
sw06_03_t1 ~~ c("cov3a", "cov3b")*sw06_03_t2   # This allows residual covariance on indicator X3 across T1 and T2
sw06_04_t1 ~~ c("cov4a", "cov4b")*sw06_04_t2   # This allows residual covariance on indicator X4 across T1 and T2

sw06_01_t1 ~~ c("res1a", "res1b")*sw06_01_t1   # This allows residual variance on indicator X1 at T1 
sw06_02_t1 ~~ c("res2a", "res2b")*sw06_02_t1   # This allows residual variance on indicator X2 at T1
sw06_03_t1 ~~ c("res3a", "res3b")*sw06_03_t1   # This allows residual variance on indicator X3 at T1
sw06_04_t1 ~~ c("res4a", "res4b")*sw06_04_t1   # This allows residual variance on indicator X4 at T1

sw06_01_t2 ~~ c("res1a", "res1b")*sw06_01_t2  # This allows residual variance on indicator X1 at T2 
sw06_02_t2 ~~ c("res2a", "res2b")*sw06_02_t2  # This allows residual variance on indicator X2 at T2 
sw06_03_t2 ~~ c("res3a", "res3b")*sw06_03_t2  # This allows residual variance on indicator X3 at T2
sw06_04_t2 ~~ c("res4a", "res4b")*sw06_04_t2  # This allows residual variance on indicator X4 at T2

sw06_01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
sw06_02_t1 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T1
sw06_03_t1 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T1
sw06_04_t1 ~ c("m4", "m4")*1     # This estimates the intercept of X4 at T1

sw06_01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
sw06_02_t2 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T2
sw06_03_t2 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T2
sw06_04_t2 ~ c("m4", "m4")*1     # This estimates the intercept of X4 at T2
'

lcs_swls_group_12_strong <- sem(mi_lcs_swls_group_strong, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                             group = "group", group.equal = c("intercepts", "loadings"))

Life satisfaction: results

# model comparison tests for measurement invariance
lavTestLRT(lcs_swls_group_12_config, lcs_swls_group_12_weak, lcs_swls_group_12_strong)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                         Df    AIC    BIC   Chisq Chisq diff Df diff Pr(>Chisq)
lcs_swls_group_12_config 50 6867.5 7014.6  94.812                              
lcs_swls_group_12_weak   53 6865.6 7001.0  98.864     4.2444       3     0.2363
lcs_swls_group_12_strong 58 6862.7 6978.8 106.007     7.1600       5     0.2090
# show model with varying latent change parameters 
# -> key parameter is "d_swls_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 1, b = Group 2)
params_lcs_swls_group_12_weak <- broom::tidy(lcs_swls_group_12_weak, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="swls" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_swls_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_lcs_swls_group_12_weak, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_swls_1 ~1 d_int_a 0.187 0.186 0.317 3.237 0.001
d_swls_1 ~1 d_int_b 0.259 0.258 0.512 4.843 0.000
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(lcs_swls_group_12_strong, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="swls" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_swls_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_swls_1 ~1 d_int 0.228 0.227 0.385 5.83 0
d_swls_1 ~1 d_int 0.228 0.227 0.448 5.83 0

Slightly more positive change in life satisfaction in Group 2 compared to Group 1 but no substantial differences according to the LRTs.

# whole model (weak invariance)
summary(lcs_swls_group_12_weak, fit.measures=TRUE, standardized=TRUE, rsquare=F)
lavaan 0.6.15 ended normally after 55 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        58
  Number of equality constraints                    23

  Number of observations per group:                   
    Group 1                                        175
    Group 2                                        179
  Number of missing patterns per group:               
    Group 1                                          3
    Group 2                                          2

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                98.864      91.108
  Degrees of freedom                                53          53
  P-value (Chi-square)                           0.000       0.001
  Scaling correction factor                                  1.085
    Yuan-Bentler correction (Mplus variant)                       
  Test statistic for each group:
    Group 1                                     43.450      40.041
    Group 2                                     55.414      51.066

Model Test Baseline Model:

  Test statistic                              1778.340    1361.324
  Degrees of freedom                                56          56
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.306

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.973       0.971
  Tucker-Lewis Index (TLI)                       0.972       0.969
                                                                  
  Robust Comparative Fit Index (CFI)                         0.976
  Robust Tucker-Lewis Index (TLI)                            0.975

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -3397.791   -3397.791
  Scaling correction factor                                  0.671
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -3348.359   -3348.359
  Scaling correction factor                                  1.096
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                6865.582    6865.582
  Bayesian (BIC)                              7001.007    7001.007
  Sample-size adjusted Bayesian (SABIC)       6889.972    6889.972

Root Mean Square Error of Approximation:

  RMSEA                                          0.070       0.064
  90 Percent confidence interval - lower         0.048       0.042
  90 Percent confidence interval - upper         0.091       0.085
  P-value H_0: RMSEA <= 0.050                    0.065       0.141
  P-value H_0: RMSEA >= 0.080                    0.229       0.104
                                                                  
  Robust RMSEA                                               0.069
  90 Percent confidence interval - lower                     0.043
  90 Percent confidence interval - upper                     0.093
  P-value H_0: Robust RMSEA <= 0.050                         0.108
  P-value H_0: Robust RMSEA >= 0.080                         0.238

Standardized Root Mean Square Residual:

  SRMR                                           0.047       0.047

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian


Group 1 [Group 1]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t1 =~                                                            
    s06_01_           1.000                               0.990    0.831
    s06_02_ (lmb2)    0.957    0.040   24.191    0.000    0.947    0.793
    s06_03_ (lmb3)    1.033    0.049   21.086    0.000    1.022    0.844
    s06_04_ (lmb4)    0.855    0.054   15.877    0.000    0.846    0.649
  swls_t2 =~                                                            
    s06_01_           1.000                               0.953    0.821
    s06_02_ (lmb2)    0.957    0.040   24.191    0.000    0.912    0.782
    s06_03_ (lmb3)    1.033    0.049   21.086    0.000    0.985    0.835
    s06_04_ (lmb4)    0.855    0.054   15.877    0.000    0.815    0.635
  d_swls_1 =~                                                           
    swls_t2           1.000                               0.617    0.617

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t2 ~                                                             
    swls_t1           1.000                               1.038    1.038

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t1 ~~                                                            
    d_swl_1 (fb_a)   -0.209    0.055   -3.795    0.000   -0.358   -0.358
 .sw06_01_t1 ~~                                                         
   .s06_01_ (cov1)    0.176    0.064    2.739    0.006    0.176    0.402
 .sw06_02_t1 ~~                                                         
   .s06_02_ (cov2)    0.228    0.066    3.438    0.001    0.228    0.431
 .sw06_03_t1 ~~                                                         
   .s06_03_ (cov3)    0.113    0.059    1.899    0.058    0.113    0.267
 .sw06_04_t1 ~~                                                         
   .s06_04_ (cov4)    0.406    0.088    4.611    0.000    0.406    0.414

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .swls_t2           0.000                               0.000    0.000
    d_swl_1 (d_n_)    0.187    0.058    3.237    0.001    0.317    0.317
    swls_t1 (wb__)    2.801    0.089   31.293    0.000    2.830    2.830
   .s06_01_           0.000                               0.000    0.000
   .s06_02_  (m2a)    0.151    0.130    1.164    0.245    0.151    0.126
   .s06_03_  (m3a)    0.021    0.156    0.133    0.894    0.021    0.017
   .s06_04_  (m4a)    0.525    0.166    3.164    0.002    0.525    0.403
   .s06_01_           0.000                               0.000    0.000
   .s06_02_  (m2a)    0.151    0.130    1.164    0.245    0.151    0.129
   .s06_03_  (m3a)    0.021    0.156    0.133    0.894    0.021    0.018
   .s06_04_  (m4a)    0.525    0.166    3.164    0.002    0.525    0.409

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .swls_t2           0.000                               0.000    0.000
    d_swl_1 (d_v_)    0.346    0.087    3.960    0.000    1.000    1.000
    swls_t1 (wb__)    0.979    0.104    9.396    0.000    1.000    1.000
   .s06_01_ (res1)    0.439    0.063    7.015    0.000    0.439    0.309
   .s06_02_ (res2)    0.530    0.063    8.458    0.000    0.530    0.371
   .s06_03_ (res3)    0.421    0.067    6.306    0.000    0.421    0.287
   .s06_04_ (res4)    0.982    0.100    9.841    0.000    0.982    0.578
   .s06_01_ (res1)    0.439    0.063    7.015    0.000    0.439    0.326
   .s06_02_ (res2)    0.530    0.063    8.458    0.000    0.530    0.389
   .s06_03_ (res3)    0.421    0.067    6.306    0.000    0.421    0.303
   .s06_04_ (res4)    0.982    0.100    9.841    0.000    0.982    0.596


Group 2 [Group 2]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t1 =~                                                            
    s06_01_           1.000                               0.993    0.827
    s06_02_ (lmb2)    0.957    0.040   24.191    0.000    0.951    0.800
    s06_03_ (lmb3)    1.033    0.049   21.086    0.000    1.026    0.857
    s06_04_ (lmb4)    0.855    0.054   15.877    0.000    0.850    0.703
  swls_t2 =~                                                            
    s06_01_           1.000                               1.001    0.829
    s06_02_ (lmb2)    0.957    0.040   24.191    0.000    0.958    0.802
    s06_03_ (lmb3)    1.033    0.049   21.086    0.000    1.034    0.858
    s06_04_ (lmb4)    0.855    0.054   15.877    0.000    0.856    0.706
  d_swls_1 =~                                                           
    swls_t2           1.000                               0.506    0.506

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t2 ~                                                             
    swls_t1           1.000                               0.992    0.992

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t1 ~~                                                            
    d_swl_1 (fb_b)   -0.121    0.053   -2.292    0.022   -0.240   -0.240
 .sw06_01_t1 ~~                                                         
   .s06_01_ (cv1b)    0.176    0.058    3.059    0.002    0.176    0.386
 .sw06_02_t1 ~~                                                         
   .s06_02_ (cv2b)    0.195    0.061    3.210    0.001    0.195    0.383
 .sw06_03_t1 ~~                                                         
   .s06_03_ (cv3b)    0.030    0.055    0.539    0.590    0.030    0.077
 .sw06_04_t1 ~~                                                         
   .s06_04_ (cv4b)    0.207    0.076    2.724    0.006    0.207    0.280

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .swls_t2           0.000                               0.000    0.000
    d_swl_1 (d_n_)    0.259    0.053    4.843    0.000    0.512    0.512
    swls_t1 (wb__)    2.544    0.087   29.309    0.000    2.561    2.561
   .s06_01_           0.000                               0.000    0.000
   .s06_02_  (m2b)    0.274    0.122    2.251    0.024    0.274    0.231
   .s06_03_  (m3b)    0.198    0.143    1.387    0.165    0.198    0.166
   .s06_04_  (m4b)    0.573    0.158    3.632    0.000    0.573    0.475
   .s06_01_           0.000                               0.000    0.000
   .s06_02_  (m2b)    0.274    0.122    2.251    0.024    0.274    0.230
   .s06_03_  (m3b)    0.198    0.143    1.387    0.165    0.198    0.165
   .s06_04_  (m4b)    0.573    0.158    3.632    0.000    0.573    0.473

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .swls_t2           0.000                               0.000    0.000
    d_swl_1 (d_v_)    0.257    0.057    4.467    0.000    1.000    1.000
    swls_t1 (wb__)    0.987    0.105    9.396    0.000    1.000    1.000
   .s06_01_ (rs1b)    0.456    0.063    7.279    0.000    0.456    0.316
   .s06_02_ (rs2b)    0.508    0.061    8.280    0.000    0.508    0.360
   .s06_03_ (rs3b)    0.382    0.056    6.784    0.000    0.382    0.266
   .s06_04_ (rs4b)    0.737    0.080    9.220    0.000    0.737    0.505
   .s06_01_ (rs1b)    0.456    0.063    7.279    0.000    0.456    0.313
   .s06_02_ (rs2b)    0.508    0.061    8.280    0.000    0.508    0.356
   .s06_03_ (rs3b)    0.382    0.056    6.784    0.000    0.382    0.263
   .s06_04_ (rs4b)    0.737    0.080    9.220    0.000    0.737    0.501
7.1.1.2 Meaning in life

Meaning in life: fitting multi-group models

Show the code
# adapt latent change score model from above and add grouping factor in estimation (also add vectorized equality constraints to the model)

# configural invariance
mi_lcs_meaning_group_config <- '
meaning_t1 =~ 1*ml01_01_t1 + c("lamb2a", "lamb2b")*ml01_04_t1 + c("lamb3a", "lamb3b")*ml01_05_t1 + c("lamb4a", "lamb4b")*ml01_06_t1 + c("lamb5a", "lamb5b")*ml01_09_t1 # This specifies the measurement model for meaning_t1 
meaning_t2 =~ 1*ml01_01_t2 + c("lamb2a", "lamb2b")*ml01_04_t2 + c("lamb3a", "lamb3b")*ml01_05_t2 + c("lamb4a", "lamb4b")*ml01_06_t2 + c("lamb5a", "lamb5b")*ml01_09_t2 # This specifies the measurement model for meaning_t2 with the equality constrained factor loadings

meaning_t2 ~ 1*meaning_t1     # This parameter regresses meaning_t2 perfectly on meaning_t1
d_meaning_1 =~ 1*meaning_t2   # This defines the latent change score factor as measured perfectly by scores on meaning_t2
meaning_t2 ~ 0*1           # This line constrains the intercept of meaning_t2 to 0
meaning_t2 ~~ 0*meaning_t2    # This fixes the variance of meaning_t2 to 0

d_meaning_1 ~ c("d_int_a", "d_int_b")*1           # This estimates the intercept of the change score 
meaning_t1 ~ c("wb_int_a", "wb_int_b")*1            # This estimates the intercept of meaning_t1 
d_meaning_1 ~~ c("d_var_a", "d_var_b")*d_meaning_1   # This estimates the variance of the change scores 
meaning_t1 ~~ c("wb_var_a", "wb_var_b")*meaning_t1     # This estimates the variance of the meaning_t1 
d_meaning_1 ~~ c("fb_a", "fb_b")*meaning_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

ml01_01_t1 ~~ c("cov1a", "cov1b")*ml01_01_t2   # This allows residual covariance on indicator X1 across T1 and T2
ml01_04_t1 ~~ c("cov2a", "cov2b")*ml01_04_t2   # This allows residual covariance on indicator X2 across T1 and T2
ml01_05_t1 ~~ c("cov3a", "cov3b")*ml01_05_t2   # This allows residual covariance on indicator X3 across T1 and T2
ml01_06_t1 ~~ c("cov4a", "cov4b")*ml01_06_t2   # This allows residual covariance on indicator X4 across T1 and T2
ml01_09_t1 ~~ c("cov5a", "cov5b")*ml01_09_t2   # This allows residual covariance on indicator X5 across T1 and T2

ml01_01_t1 ~~ c("res1a", "res1b")*ml01_01_t1   # This allows residual variance on indicator X1 at T1 
ml01_04_t1 ~~ c("res2a", "res2b")*ml01_04_t1   # This allows residual variance on indicator X2 at T1
ml01_05_t1 ~~ c("res3a", "res3b")*ml01_05_t1   # This allows residual variance on indicator X3 at T1
ml01_06_t1 ~~ c("res4a", "res4b")*ml01_06_t1   # This allows residual variance on indicator X4 at T1
ml01_09_t1 ~~ c("res5a", "res5b")*ml01_09_t1   # This allows residual variance on indicator X5 at T1

ml01_01_t2 ~~ c("res1a", "res1b")*ml01_01_t2  # This allows residual variance on indicator X1 at T2 
ml01_04_t2 ~~ c("res2a", "res2b")*ml01_04_t2  # This allows residual variance on indicator X2 at T2 
ml01_05_t2 ~~ c("res3a", "res3b")*ml01_05_t2  # This allows residual variance on indicator X3 at T2
ml01_06_t2 ~~ c("res4a", "res4b")*ml01_06_t2  # This allows residual variance on indicator X4 at T2
ml01_09_t2 ~~ c("res5a", "res5b")*ml01_09_t2  # This allows residual variance on indicator X5 at T2

ml01_01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ml01_04_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
ml01_05_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1
ml01_06_t1 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T1
ml01_09_t1 ~ c("m5a", "m5b")*1     # This estimates the intercept of X5 at T1

ml01_01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ml01_04_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
ml01_05_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
ml01_06_t2 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T2
ml01_09_t2 ~ c("m5a", "m5b")*1     # This estimates the intercept of X5 at T2
'
lcs_meaning_group_12_config <- sem(mi_lcs_meaning_group_config, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', 
                                fixed.x=FALSE, missing='fiml', group = "group")

# weak invariance
mi_lcs_meaning_group_weak <- '
meaning_t1 =~ 1*ml01_01_t1 + c("lamb2", "lamb2")*ml01_04_t1 + c("lamb3", "lamb3")*ml01_05_t1 + c("lamb4", "lamb4")*ml01_06_t1 + c("lamb5", "lamb5")*ml01_09_t1 # This specifies the measurement model for meaning_t1 
meaning_t2 =~ 1*ml01_01_t2 + c("lamb2", "lamb2")*ml01_04_t2 + c("lamb3", "lamb3")*ml01_05_t2 + c("lamb4", "lamb4")*ml01_06_t2 + c("lamb5", "lamb5")*ml01_09_t2 # This specifies the measurement model for meaning_t2 with the equality constrained factor loadings

meaning_t2 ~ 1*meaning_t1     # This parameter regresses meaning_t2 perfectly on meaning_t1
d_meaning_1 =~ 1*meaning_t2   # This defines the latent change score factor as measured perfectly by scores on meaning_t2
meaning_t2 ~ 0*1           # This line constrains the intercept of meaning_t2 to 0
meaning_t2 ~~ 0*meaning_t2    # This fixes the variance of meaning_t2 to 0

d_meaning_1 ~ c("d_int_a", "d_int_b")*1           # This estimates the intercept of the change score 
meaning_t1 ~ c("wb_int_a", "wb_int_b")*1            # This estimates the intercept of meaning_t1 
d_meaning_1 ~~ c("d_var_a", "d_var_b")*d_meaning_1   # This estimates the variance of the change scores 
meaning_t1 ~~ c("wb_var_a", "wb_var_b")*meaning_t1     # This estimates the variance of the meaning_t1 
d_meaning_1 ~~ c("fb_a", "fb_b")*meaning_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

ml01_01_t1 ~~ c("cov1a", "cov1b")*ml01_01_t2   # This allows residual covariance on indicator X1 across T1 and T2
ml01_04_t1 ~~ c("cov2a", "cov2b")*ml01_04_t2   # This allows residual covariance on indicator X2 across T1 and T2
ml01_05_t1 ~~ c("cov3a", "cov3b")*ml01_05_t2   # This allows residual covariance on indicator X3 across T1 and T2
ml01_06_t1 ~~ c("cov4a", "cov4b")*ml01_06_t2   # This allows residual covariance on indicator X4 across T1 and T2
ml01_09_t1 ~~ c("cov5a", "cov5b")*ml01_09_t2   # This allows residual covariance on indicator X5 across T1 and T2

ml01_01_t1 ~~ c("res1a", "res1b")*ml01_01_t1   # This allows residual variance on indicator X1 at T1 
ml01_04_t1 ~~ c("res2a", "res2b")*ml01_04_t1   # This allows residual variance on indicator X2 at T1
ml01_05_t1 ~~ c("res3a", "res3b")*ml01_05_t1   # This allows residual variance on indicator X3 at T1
ml01_06_t1 ~~ c("res4a", "res4b")*ml01_06_t1   # This allows residual variance on indicator X4 at T1
ml01_09_t1 ~~ c("res5a", "res5b")*ml01_09_t1   # This allows residual variance on indicator X5 at T1

ml01_01_t2 ~~ c("res1a", "res1b")*ml01_01_t2  # This allows residual variance on indicator X1 at T2 
ml01_04_t2 ~~ c("res2a", "res2b")*ml01_04_t2  # This allows residual variance on indicator X2 at T2 
ml01_05_t2 ~~ c("res3a", "res3b")*ml01_05_t2  # This allows residual variance on indicator X3 at T2
ml01_06_t2 ~~ c("res4a", "res4b")*ml01_06_t2  # This allows residual variance on indicator X4 at T2
ml01_09_t2 ~~ c("res5a", "res5b")*ml01_09_t2  # This allows residual variance on indicator X5 at T2

ml01_01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ml01_04_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
ml01_05_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1
ml01_06_t1 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T1
ml01_09_t1 ~ c("m5a", "m5b")*1     # This estimates the intercept of X5 at T1

ml01_01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ml01_04_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
ml01_05_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
ml01_06_t2 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T2
ml01_09_t2 ~ c("m5a", "m5b")*1     # This estimates the intercept of X5 at T2
'

lcs_meaning_group_12_weak <- sem(mi_lcs_meaning_group_weak, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                           group = "group", group.equal = "loadings")

# strong invariance
mi_lcs_meaning_group_strong <- '
meaning_t1 =~ 1*ml01_01_t1 + c("lamb2", "lamb2")*ml01_04_t1 + c("lamb3", "lamb3")*ml01_05_t1 + c("lamb4", "lamb4")*ml01_06_t1 + c("lamb5", "lamb5")*ml01_09_t1 # This specifies the measurement model for meaning_t1 
meaning_t2 =~ 1*ml01_01_t2 + c("lamb2", "lamb2")*ml01_04_t2 + c("lamb3", "lamb3")*ml01_05_t2 + c("lamb4", "lamb4")*ml01_06_t2 + c("lamb5", "lamb5")*ml01_09_t2 # This specifies the measurement model for meaning_t2 with the equality constrained factor loadings

meaning_t2 ~ 1*meaning_t1     # This parameter regresses meaning_t2 perfectly on meaning_t1
d_meaning_1 =~ 1*meaning_t2   # This defines the latent change score factor as measured perfectly by scores on meaning_t2
meaning_t2 ~ 0*1           # This line constrains the intercept of meaning_t2 to 0
meaning_t2 ~~ 0*meaning_t2    # This fixes the variance of meaning_t2 to 0

d_meaning_1 ~ c("d_int", "d_int")*1           # This estimates the intercept of the change score 
meaning_t1 ~ c("wb_int", "wb_int")*1            # This estimates the intercept of meaning_t1 
d_meaning_1 ~~ c("d_var_a", "d_var_b")*d_meaning_1   # This estimates the variance of the change scores 
meaning_t1 ~~ c("wb_var_a", "wb_var_b")*meaning_t1     # This estimates the variance of the meaning_t1 
d_meaning_1 ~~ c("fb_a", "fb_b")*meaning_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

ml01_01_t1 ~~ c("cov1a", "cov1b")*ml01_01_t2   # This allows residual covariance on indicator X1 across T1 and T2
ml01_04_t1 ~~ c("cov2a", "cov2b")*ml01_04_t2   # This allows residual covariance on indicator X2 across T1 and T2
ml01_05_t1 ~~ c("cov3a", "cov3b")*ml01_05_t2   # This allows residual covariance on indicator X3 across T1 and T2
ml01_06_t1 ~~ c("cov4a", "cov4b")*ml01_06_t2   # This allows residual covariance on indicator X4 across T1 and T2
ml01_09_t1 ~~ c("cov5a", "cov5b")*ml01_09_t2   # This allows residual covariance on indicator X5 across T1 and T2

ml01_01_t1 ~~ c("res1a", "res1b")*ml01_01_t1   # This allows residual variance on indicator X1 at T1 
ml01_04_t1 ~~ c("res2a", "res2b")*ml01_04_t1   # This allows residual variance on indicator X2 at T1
ml01_05_t1 ~~ c("res3a", "res3b")*ml01_05_t1   # This allows residual variance on indicator X3 at T1
ml01_06_t1 ~~ c("res4a", "res4b")*ml01_06_t1   # This allows residual variance on indicator X4 at T1
ml01_09_t1 ~~ c("res5a", "res5b")*ml01_09_t1   # This allows residual variance on indicator X5 at T1

ml01_01_t2 ~~ c("res1a", "res1b")*ml01_01_t2  # This allows residual variance on indicator X1 at T2 
ml01_04_t2 ~~ c("res2a", "res2b")*ml01_04_t2  # This allows residual variance on indicator X2 at T2 
ml01_05_t2 ~~ c("res3a", "res3b")*ml01_05_t2  # This allows residual variance on indicator X3 at T2
ml01_06_t2 ~~ c("res4a", "res4b")*ml01_06_t2  # This allows residual variance on indicator X4 at T2
ml01_09_t2 ~~ c("res5a", "res5b")*ml01_09_t2  # This allows residual variance on indicator X5 at T2

ml01_01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ml01_04_t1 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T1
ml01_05_t1 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T1
ml01_06_t1 ~ c("m4", "m4")*1     # This estimates the intercept of X4 at T1
ml01_09_t1 ~ c("m5", "m5")*1     # This estimates the intercept of X5 at T1

ml01_01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ml01_04_t2 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T2
ml01_05_t2 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T2
ml01_06_t2 ~ c("m4", "m4")*1     # This estimates the intercept of X4 at T2
ml01_09_t2 ~ c("m5", "m5")*1     # This estimates the intercept of X5 at T2
'

lcs_meaning_group_12_strong <- sem(mi_lcs_meaning_group_strong, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                             group = "group", group.equal = c("intercepts", "loadings"))

Meaning in life: results

# model comparison tests for measurement invariance
lavTestLRT(lcs_meaning_group_12_config, lcs_meaning_group_12_weak, lcs_meaning_group_12_strong)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                            Df   AIC   BIC  Chisq Chisq diff Df diff Pr(>Chisq)
lcs_meaning_group_12_config 84 10375 10553 144.86                              
lcs_meaning_group_12_weak   88 10368 10531 145.63     0.7944       4     0.9392
lcs_meaning_group_12_strong 94 10365 10504 154.05     8.4484       6     0.2071
# show model with varying latent change parameters 
# -> key parameter is "d_meaning_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 1, b = Group 2)
params_lcs_meaning_group_12_weak <- broom::tidy(lcs_meaning_group_12_weak, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="meaning" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_meaning_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_lcs_meaning_group_12_weak, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_meaning_1 ~1 d_int_a 0.091 0.061 0.110 1.173 0.241
d_meaning_1 ~1 d_int_b 0.079 0.053 0.093 0.987 0.324
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(lcs_meaning_group_12_strong, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="meaning" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_meaning_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_meaning_1 ~1 d_int 0.084 0.057 0.102 1.505 0.132
d_meaning_1 ~1 d_int 0.084 0.057 0.099 1.505 0.132

Very similar degree of positive change in meaning in life in both groups and no substantial differences according to the LRTs.

# whole model (weak invariance)
summary(lcs_meaning_group_12_weak, fit.measures=TRUE, standardized=TRUE, rsquare=F)
lavaan 0.6.15 ended normally after 88 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        72
  Number of equality constraints                    30

  Number of observations per group:                   
    Group 1                                        175
    Group 2                                        179
  Number of missing patterns per group:               
    Group 1                                          3
    Group 2                                          2

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                               145.625     126.743
  Degrees of freedom                                88          88
  P-value (Chi-square)                           0.000       0.004
  Scaling correction factor                                  1.149
    Yuan-Bentler correction (Mplus variant)                       
  Test statistic for each group:
    Group 1                                     75.166      65.420
    Group 2                                     70.459      61.323

Model Test Baseline Model:

  Test statistic                              2701.949    1979.178
  Degrees of freedom                                90          90
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.365

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.978       0.979
  Tucker-Lewis Index (TLI)                       0.977       0.979
                                                                  
  Robust Comparative Fit Index (CFI)                         0.984
  Robust Tucker-Lewis Index (TLI)                            0.984

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -5142.105   -5142.105
  Scaling correction factor                                  0.729
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -5069.293   -5069.293
  Scaling correction factor                                  1.181
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                               10368.211   10368.211
  Bayesian (BIC)                             10530.721   10530.721
  Sample-size adjusted Bayesian (SABIC)      10397.479   10397.479

Root Mean Square Error of Approximation:

  RMSEA                                          0.061       0.050
  90 Percent confidence interval - lower         0.043       0.030
  90 Percent confidence interval - upper         0.078       0.067
  P-value H_0: RMSEA <= 0.050                    0.152       0.488
  P-value H_0: RMSEA >= 0.080                    0.032       0.001
                                                                  
  Robust RMSEA                                               0.053
  90 Percent confidence interval - lower                     0.027
  90 Percent confidence interval - upper                     0.075
  P-value H_0: Robust RMSEA <= 0.050                         0.387
  P-value H_0: Robust RMSEA >= 0.080                         0.022

Standardized Root Mean Square Residual:

  SRMR                                           0.043       0.043

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian


Group 1 [Group 1]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 =~                                                         
    m01_01_           1.000                               1.358    0.841
    m01_04_ (lmb2)    1.055    0.039   26.764    0.000    1.432    0.865
    m01_05_ (lmb3)    0.976    0.039   25.338    0.000    1.325    0.848
    m01_06_ (lmb4)    1.030    0.042   24.518    0.000    1.399    0.842
    m01_09_ (lmb5)   -1.048    0.049  -21.269    0.000   -1.423   -0.747
  meaning_t2 =~                                                         
    m01_01_           1.000                               1.370    0.843
    m01_04_ (lmb2)    1.055    0.039   26.764    0.000    1.445    0.867
    m01_05_ (lmb3)    0.976    0.039   25.338    0.000    1.337    0.850
    m01_06_ (lmb4)    1.030    0.042   24.518    0.000    1.412    0.844
    m01_09_ (lmb5)   -1.048    0.049  -21.269    0.000   -1.436   -0.750
  d_meaning_1 =~                                                        
    mnng_t2           1.000                               0.604    0.604

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t2 ~                                                          
    meaning_t1        1.000                               0.991    0.991

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 ~~                                                         
    d_mnn_1 (fb_a)   -0.326    0.101   -3.229    0.001   -0.290   -0.290
 .ml01_01_t1 ~~                                                         
   .m01_01_ (cov1)    0.315    0.093    3.372    0.001    0.315    0.413
 .ml01_04_t1 ~~                                                         
   .m01_04_ (cov2)   -0.000    0.065   -0.003    0.998   -0.000   -0.000
 .ml01_05_t1 ~~                                                         
   .m01_05_ (cov3)    0.064    0.083    0.771    0.441    0.064    0.094
 .ml01_06_t1 ~~                                                         
   .m01_06_ (cov4)    0.115    0.090    1.287    0.198    0.115    0.144
 .ml01_09_t1 ~~                                                         
   .m01_09_ (cov5)    0.485    0.149    3.257    0.001    0.485    0.302

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_n_)    0.091    0.078    1.173    0.241    0.110    0.110
    mnng_t1 (wb__)    4.502    0.122   36.840    0.000    3.316    3.316
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2a)   -0.544    0.203   -2.680    0.007   -0.544   -0.329
   .m01_05_  (m3a)    0.372    0.203    1.829    0.067    0.372    0.238
   .m01_06_  (m4a)   -0.391    0.224   -1.742    0.081   -0.391   -0.235
   .m01_09_  (m5a)    8.263    0.243   33.941    0.000    8.263    4.336
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2a)   -0.544    0.203   -2.680    0.007   -0.544   -0.327
   .m01_05_  (m3a)    0.372    0.203    1.829    0.067    0.372    0.236
   .m01_06_  (m4a)   -0.391    0.224   -1.742    0.081   -0.391   -0.234
   .m01_09_  (m5a)    8.263    0.243   33.941    0.000    8.263    4.315

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_v_)    0.684    0.097    7.056    0.000    1.000    1.000
    mnng_t1 (wb__)    1.843    0.204    9.026    0.000    1.000    1.000
   .m01_01_ (res1)    0.763    0.095    8.030    0.000    0.763    0.293
   .m01_04_ (res2)    0.689    0.127    5.436    0.000    0.689    0.251
   .m01_05_ (res3)    0.684    0.092    7.419    0.000    0.684    0.280
   .m01_06_ (res4)    0.803    0.094    8.582    0.000    0.803    0.291
   .m01_09_ (res5)    1.606    0.209    7.670    0.000    1.606    0.442
   .m01_01_ (res1)    0.763    0.095    8.030    0.000    0.763    0.289
   .m01_04_ (res2)    0.689    0.127    5.436    0.000    0.689    0.248
   .m01_05_ (res3)    0.684    0.092    7.419    0.000    0.684    0.277
   .m01_06_ (res4)    0.803    0.094    8.582    0.000    0.803    0.287
   .m01_09_ (res5)    1.606    0.209    7.670    0.000    1.606    0.438


Group 2 [Group 2]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 =~                                                         
    m01_01_           1.000                               1.378    0.819
    m01_04_ (lmb2)    1.055    0.039   26.764    0.000    1.453    0.886
    m01_05_ (lmb3)    0.976    0.039   25.338    0.000    1.345    0.826
    m01_06_ (lmb4)    1.030    0.042   24.518    0.000    1.419    0.843
    m01_09_ (lmb5)   -1.048    0.049  -21.269    0.000   -1.444   -0.733
  meaning_t2 =~                                                         
    m01_01_           1.000                               1.349    0.813
    m01_04_ (lmb2)    1.055    0.039   26.764    0.000    1.423    0.882
    m01_05_ (lmb3)    0.976    0.039   25.338    0.000    1.317    0.821
    m01_06_ (lmb4)    1.030    0.042   24.518    0.000    1.390    0.838
    m01_09_ (lmb5)   -1.048    0.049  -21.269    0.000   -1.414   -0.726
  d_meaning_1 =~                                                        
    mnng_t2           1.000                               0.627    0.627

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t2 ~                                                          
    meaning_t1        1.000                               1.021    1.021

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 ~~                                                         
    d_mnn_1 (fb_b)   -0.396    0.129   -3.075    0.002   -0.340   -0.340
 .ml01_01_t1 ~~                                                         
   .m01_01_ (cv1b)    0.236    0.085    2.764    0.006    0.236    0.253
 .ml01_04_t1 ~~                                                         
   .m01_04_ (cv2b)    0.003    0.091    0.034    0.973    0.003    0.005
 .ml01_05_t1 ~~                                                         
   .m01_05_ (cv3b)    0.343    0.106    3.236    0.001    0.343    0.408
 .ml01_06_t1 ~~                                                         
   .m01_06_ (cv4b)    0.179    0.084    2.126    0.033    0.179    0.218
 .ml01_09_t1 ~~                                                         
   .m01_09_ (cv5b)    0.405    0.214    1.895    0.058    0.405    0.226

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_n_)    0.079    0.080    0.987    0.324    0.093    0.093
    mnng_t1 (wb__)    4.152    0.120   34.644    0.000    3.014    3.014
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2b)   -0.510    0.182   -2.808    0.005   -0.510   -0.311
   .m01_05_  (m3b)    0.536    0.198    2.702    0.007    0.536    0.329
   .m01_06_  (m4b)   -0.192    0.196   -0.978    0.328   -0.192   -0.114
   .m01_09_  (m5b)    8.153    0.226   36.075    0.000    8.153    4.139
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2b)   -0.510    0.182   -2.808    0.005   -0.510   -0.316
   .m01_05_  (m3b)    0.536    0.198    2.702    0.007    0.536    0.334
   .m01_06_  (m4b)   -0.192    0.196   -0.978    0.328   -0.192   -0.116
   .m01_09_  (m5b)    8.153    0.226   36.075    0.000    8.153    4.185

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_v_)    0.715    0.138    5.167    0.000    1.000    1.000
    mnng_t1 (wb__)    1.898    0.190    9.989    0.000    1.000    1.000
   .m01_01_ (rs1b)    0.931    0.104    8.961    0.000    0.931    0.329
   .m01_04_ (rs2b)    0.577    0.096    6.003    0.000    0.577    0.215
   .m01_05_ (rs3b)    0.840    0.110    7.620    0.000    0.840    0.317
   .m01_06_ (rs4b)    0.819    0.112    7.309    0.000    0.819    0.289
   .m01_09_ (rs5b)    1.795    0.237    7.558    0.000    1.795    0.463
   .m01_01_ (rs1b)    0.931    0.104    8.961    0.000    0.931    0.338
   .m01_04_ (rs2b)    0.577    0.096    6.003    0.000    0.577    0.222
   .m01_05_ (rs3b)    0.840    0.110    7.620    0.000    0.840    0.326
   .m01_06_ (rs4b)    0.819    0.112    7.309    0.000    0.819    0.298
   .m01_09_ (rs5b)    1.795    0.237    7.558    0.000    1.795    0.473
7.1.1.2.1 Search for Meaning

Search for Meaning (see preregistration deviation): fitting multi-group models

Show the code
# adapt latent change score model from above and add grouping factor in estimation (also add vectorized equality constraints to the model)

# configural invariance
mi_lcs_search_group_config <- '
search_t1 =~ 1*ml01_02_t1 + c("lamb2a", "lamb2b")*ml01_03_t1 + c("lamb3a", "lamb3b")*ml01_07_t1 + c("lamb4a", "lamb4b")*ml01_08_t1 + c("lamb5a", "lamb5b")*ml01_10_t1 # This specifies the measurement model for search_t1 
search_t2 =~ 1*ml01_02_t2 + c("lamb2a", "lamb2b")*ml01_03_t2 + c("lamb3a", "lamb3b")*ml01_07_t2 + c("lamb4a", "lamb4b")*ml01_08_t2 + c("lamb5a", "lamb5b")*ml01_10_t2 # This specifies the measurement model for search_t2 with the equality constrained factor loadings

search_t2 ~ 1*search_t1     # This parameter regresses search_t2 perfectly on search_t1
d_search_1 =~ 1*search_t2   # This defines the latent change score factor as measured perfectly by scores on search_t2
search_t2 ~ 0*1           # This line constrains the intercept of search_t2 to 0
search_t2 ~~ 0*search_t2    # This fixes the variance of search_t2 to 0

d_search_1 ~ c("d_int_a", "d_int_b")*1           # This estimates the intercept of the change score 
search_t1 ~ c("wb_int_a", "wb_int_b")*1            # This estimates the intercept of search_t1 
d_search_1 ~~ c("d_var_a", "d_var_b")*d_search_1   # This estimates the variance of the change scores 
search_t1 ~~ c("wb_var_a", "wb_var_b")*search_t1     # This estimates the variance of the search_t1 
d_search_1 ~~ c("fb_a", "fb_b")*search_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

ml01_02_t1 ~~ c("cov1a", "cov1b")*ml01_02_t2   # This allows residual covariance on indicator X1 across T1 and T2
ml01_03_t1 ~~ c("cov2a", "cov2b")*ml01_03_t2   # This allows residual covariance on indicator X2 across T1 and T2
ml01_07_t1 ~~ c("cov3a", "cov3b")*ml01_07_t2   # This allows residual covariance on indicator X3 across T1 and T2
ml01_08_t1 ~~ c("cov4a", "cov4b")*ml01_08_t2   # This allows residual covariance on indicator X4 across T1 and T2
ml01_10_t1 ~~ c("cov5a", "cov5b")*ml01_10_t2   # This allows residual covariance on indicator X5 across T1 and T2

ml01_02_t1 ~~ c("res1a", "res1b")*ml01_02_t1   # This allows residual variance on indicator X1 at T1 
ml01_03_t1 ~~ c("res2a", "res2b")*ml01_03_t1   # This allows residual variance on indicator X2 at T1
ml01_07_t1 ~~ c("res3a", "res3b")*ml01_07_t1   # This allows residual variance on indicator X3 at T1
ml01_08_t1 ~~ c("res4a", "res4b")*ml01_08_t1   # This allows residual variance on indicator X4 at T1
ml01_10_t1 ~~ c("res5a", "res5b")*ml01_10_t1   # This allows residual variance on indicator X5 at T1

ml01_02_t2 ~~ c("res1a", "res1b")*ml01_02_t2  # This allows residual variance on indicator X1 at T2 
ml01_03_t2 ~~ c("res2a", "res2b")*ml01_03_t2  # This allows residual variance on indicator X2 at T2 
ml01_07_t2 ~~ c("res3a", "res3b")*ml01_07_t2  # This allows residual variance on indicator X3 at T2
ml01_08_t2 ~~ c("res4a", "res4b")*ml01_08_t2  # This allows residual variance on indicator X4 at T2
ml01_10_t2 ~~ c("res5a", "res5b")*ml01_10_t2  # This allows residual variance on indicator X5 at T2

ml01_02_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ml01_03_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
ml01_07_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1
ml01_08_t1 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T1
ml01_10_t1 ~ c("m5a", "m5b")*1     # This estimates the intercept of X5 at T1

ml01_02_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ml01_03_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
ml01_07_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
ml01_08_t2 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T2
ml01_10_t2 ~ c("m5a", "m5b")*1     # This estimates the intercept of X5 at T2
'
lcs_search_group_12_config <- sem(mi_lcs_search_group_config, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', 
                                fixed.x=FALSE, missing='fiml', group = "group")

# weak invariance
mi_lcs_search_group_weak <- '
search_t1 =~ 1*ml01_02_t1 + c("lamb2", "lamb2")*ml01_03_t1 + c("lamb3", "lamb3")*ml01_07_t1 + c("lamb4", "lamb4")*ml01_08_t1 + c("lamb5", "lamb5")*ml01_10_t1 # This specifies the measurement model for search_t1 
search_t2 =~ 1*ml01_02_t2 + c("lamb2", "lamb2")*ml01_03_t2 + c("lamb3", "lamb3")*ml01_07_t2 + c("lamb4", "lamb4")*ml01_08_t2 + c("lamb5", "lamb5")*ml01_10_t2 # This specifies the measurement model for search_t2 with the equality constrained factor loadings

search_t2 ~ 1*search_t1     # This parameter regresses search_t2 perfectly on search_t1
d_search_1 =~ 1*search_t2   # This defines the latent change score factor as measured perfectly by scores on search_t2
search_t2 ~ 0*1           # This line constrains the intercept of search_t2 to 0
search_t2 ~~ 0*search_t2    # This fixes the variance of search_t2 to 0

d_search_1 ~ c("d_int_a", "d_int_b")*1           # This estimates the intercept of the change score 
search_t1 ~ c("wb_int_a", "wb_int_b")*1            # This estimates the intercept of search_t1 
d_search_1 ~~ c("d_var_a", "d_var_b")*d_search_1   # This estimates the variance of the change scores 
search_t1 ~~ c("wb_var_a", "wb_var_b")*search_t1     # This estimates the variance of the search_t1 
d_search_1 ~~ c("fb_a", "fb_b")*search_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

ml01_02_t1 ~~ c("cov1a", "cov1b")*ml01_02_t2   # This allows residual covariance on indicator X1 across T1 and T2
ml01_03_t1 ~~ c("cov2a", "cov2b")*ml01_03_t2   # This allows residual covariance on indicator X2 across T1 and T2
ml01_07_t1 ~~ c("cov3a", "cov3b")*ml01_07_t2   # This allows residual covariance on indicator X3 across T1 and T2
ml01_08_t1 ~~ c("cov4a", "cov4b")*ml01_08_t2   # This allows residual covariance on indicator X4 across T1 and T2
ml01_10_t1 ~~ c("cov5a", "cov5b")*ml01_10_t2   # This allows residual covariance on indicator X5 across T1 and T2

ml01_02_t1 ~~ c("res1a", "res1b")*ml01_02_t1   # This allows residual variance on indicator X1 at T1 
ml01_03_t1 ~~ c("res2a", "res2b")*ml01_03_t1   # This allows residual variance on indicator X2 at T1
ml01_07_t1 ~~ c("res3a", "res3b")*ml01_07_t1   # This allows residual variance on indicator X3 at T1
ml01_08_t1 ~~ c("res4a", "res4b")*ml01_08_t1   # This allows residual variance on indicator X4 at T1
ml01_10_t1 ~~ c("res5a", "res5b")*ml01_10_t1   # This allows residual variance on indicator X5 at T1

ml01_02_t2 ~~ c("res1a", "res1b")*ml01_02_t2  # This allows residual variance on indicator X1 at T2 
ml01_03_t2 ~~ c("res2a", "res2b")*ml01_03_t2  # This allows residual variance on indicator X2 at T2 
ml01_07_t2 ~~ c("res3a", "res3b")*ml01_07_t2  # This allows residual variance on indicator X3 at T2
ml01_08_t2 ~~ c("res4a", "res4b")*ml01_08_t2  # This allows residual variance on indicator X4 at T2
ml01_10_t2 ~~ c("res5a", "res5b")*ml01_10_t2  # This allows residual variance on indicator X5 at T2

ml01_02_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ml01_03_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
ml01_07_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1
ml01_08_t1 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T1
ml01_10_t1 ~ c("m5a", "m5b")*1     # This estimates the intercept of X5 at T1

ml01_02_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ml01_03_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
ml01_07_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
ml01_08_t2 ~ c("m4a", "m4b")*1     # This estimates the intercept of X4 at T2
ml01_10_t2 ~ c("m5a", "m5b")*1     # This estimates the intercept of X5 at T2
'

lcs_search_group_12_weak <- sem(mi_lcs_search_group_weak, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                           group = "group", group.equal = "loadings")

# strong invariance
mi_lcs_search_group_strong <- '
search_t1 =~ 1*ml01_02_t1 + c("lamb2", "lamb2")*ml01_03_t1 + c("lamb3", "lamb3")*ml01_07_t1 + c("lamb4", "lamb4")*ml01_08_t1 + c("lamb5", "lamb5")*ml01_10_t1 # This specifies the measurement model for search_t1 
search_t2 =~ 1*ml01_02_t2 + c("lamb2", "lamb2")*ml01_03_t2 + c("lamb3", "lamb3")*ml01_07_t2 + c("lamb4", "lamb4")*ml01_08_t2 + c("lamb5", "lamb5")*ml01_10_t2 # This specifies the measurement model for search_t2 with the equality constrained factor loadings

search_t2 ~ 1*search_t1     # This parameter regresses search_t2 perfectly on search_t1
d_search_1 =~ 1*search_t2   # This defines the latent change score factor as measured perfectly by scores on search_t2
search_t2 ~ 0*1           # This line constrains the intercept of search_t2 to 0
search_t2 ~~ 0*search_t2    # This fixes the variance of search_t2 to 0

d_search_1 ~ c("d_int", "d_int")*1           # This estimates the intercept of the change score 
search_t1 ~ c("wb_int", "wb_int")*1            # This estimates the intercept of search_t1 
d_search_1 ~~ c("d_var_a", "d_var_b")*d_search_1   # This estimates the variance of the change scores 
search_t1 ~~ c("wb_var_a", "wb_var_b")*search_t1     # This estimates the variance of the search_t1 
d_search_1 ~~ c("fb_a", "fb_b")*search_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

ml01_02_t1 ~~ c("cov1a", "cov1b")*ml01_02_t2   # This allows residual covariance on indicator X1 across T1 and T2
ml01_03_t1 ~~ c("cov2a", "cov2b")*ml01_03_t2   # This allows residual covariance on indicator X2 across T1 and T2
ml01_07_t1 ~~ c("cov3a", "cov3b")*ml01_07_t2   # This allows residual covariance on indicator X3 across T1 and T2
ml01_08_t1 ~~ c("cov4a", "cov4b")*ml01_08_t2   # This allows residual covariance on indicator X4 across T1 and T2
ml01_10_t1 ~~ c("cov5a", "cov5b")*ml01_10_t2   # This allows residual covariance on indicator X5 across T1 and T2

ml01_02_t1 ~~ c("res1a", "res1b")*ml01_02_t1   # This allows residual variance on indicator X1 at T1 
ml01_03_t1 ~~ c("res2a", "res2b")*ml01_03_t1   # This allows residual variance on indicator X2 at T1
ml01_07_t1 ~~ c("res3a", "res3b")*ml01_07_t1   # This allows residual variance on indicator X3 at T1
ml01_08_t1 ~~ c("res4a", "res4b")*ml01_08_t1   # This allows residual variance on indicator X4 at T1
ml01_10_t1 ~~ c("res5a", "res5b")*ml01_10_t1   # This allows residual variance on indicator X5 at T1

ml01_02_t2 ~~ c("res1a", "res1b")*ml01_02_t2  # This allows residual variance on indicator X1 at T2 
ml01_03_t2 ~~ c("res2a", "res2b")*ml01_03_t2  # This allows residual variance on indicator X2 at T2 
ml01_07_t2 ~~ c("res3a", "res3b")*ml01_07_t2  # This allows residual variance on indicator X3 at T2
ml01_08_t2 ~~ c("res4a", "res4b")*ml01_08_t2  # This allows residual variance on indicator X4 at T2
ml01_10_t2 ~~ c("res5a", "res5b")*ml01_10_t2  # This allows residual variance on indicator X5 at T2

ml01_02_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ml01_03_t1 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T1
ml01_07_t1 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T1
ml01_08_t1 ~ c("m4", "m4")*1     # This estimates the intercept of X4 at T1
ml01_10_t1 ~ c("m5", "m5")*1     # This estimates the intercept of X5 at T1

ml01_02_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ml01_03_t2 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T2
ml01_07_t2 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T2
ml01_08_t2 ~ c("m4", "m4")*1     # This estimates the intercept of X4 at T2
ml01_10_t2 ~ c("m5", "m5")*1     # This estimates the intercept of X5 at T2
'

lcs_search_group_12_strong <- sem(mi_lcs_search_group_strong, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                             group = "group", group.equal = c("intercepts", "loadings"))

Meaning in life: results

# model comparison tests for measurement invariance
lavTestLRT(lcs_search_group_12_config, lcs_search_group_12_weak, lcs_search_group_12_strong)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                           Df   AIC   BIC  Chisq Chisq diff Df diff Pr(>Chisq)
lcs_search_group_12_config 84 10166 10344 222.52                              
lcs_search_group_12_weak   88 10175 10337 239.46    14.4057       4   0.006107
lcs_search_group_12_strong 94 10164 10304 241.12     1.6738       6   0.947126
                             
lcs_search_group_12_config   
lcs_search_group_12_weak   **
lcs_search_group_12_strong   
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# show model with varying latent change parameters 
# -> key parameter is "d_search_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 1, b = Group 2)
params_lcs_search_group_12_weak <- broom::tidy(lcs_search_group_12_weak, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="search" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_search_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_lcs_search_group_12_weak, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_search_1 ~1 d_int_a -0.144 -0.101 -0.164 -1.788 0.074
d_search_1 ~1 d_int_b -0.147 -0.103 -0.164 -1.804 0.071
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(lcs_search_group_12_strong, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="search" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_search_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_search_1 ~1 d_int -0.146 -0.103 -0.166 -2.527 0.012
d_search_1 ~1 d_int -0.146 -0.103 -0.164 -2.527 0.012

Very similar degree of positive change in search for meaning in life in both groups and no substantial differences (in means) according to the LRTs.

# whole model (weak invariance)
summary(lcs_search_group_12_weak, fit.measures=TRUE, standardized=TRUE, rsquare=F)
lavaan 0.6.15 ended normally after 82 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        72
  Number of equality constraints                    30

  Number of observations per group:                   
    Group 1                                        175
    Group 2                                        179
  Number of missing patterns per group:               
    Group 1                                          3
    Group 2                                          2

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                               239.462     185.153
  Degrees of freedom                                88          88
  P-value (Chi-square)                           0.000       0.000
  Scaling correction factor                                  1.293
    Yuan-Bentler correction (Mplus variant)                       
  Test statistic for each group:
    Group 1                                     87.695      67.806
    Group 2                                    151.767     117.347

Model Test Baseline Model:

  Test statistic                              2802.551    1727.272
  Degrees of freedom                                90          90
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.623

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.944       0.941
  Tucker-Lewis Index (TLI)                       0.943       0.939
                                                                  
  Robust Comparative Fit Index (CFI)                         0.954
  Robust Tucker-Lewis Index (TLI)                            0.953

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -5045.375   -5045.375
  Scaling correction factor                                  0.975
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -4925.644   -4925.644
  Scaling correction factor                                  1.416
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                               10174.749   10174.749
  Bayesian (BIC)                             10337.260   10337.260
  Sample-size adjusted Bayesian (SABIC)      10204.018   10204.018

Root Mean Square Error of Approximation:

  RMSEA                                          0.099       0.079
  90 Percent confidence interval - lower         0.084       0.065
  90 Percent confidence interval - upper         0.114       0.093
  P-value H_0: RMSEA <= 0.050                    0.000       0.001
  P-value H_0: RMSEA >= 0.080                    0.979       0.465
                                                                  
  Robust RMSEA                                               0.092
  90 Percent confidence interval - lower                     0.072
  90 Percent confidence interval - upper                     0.112
  P-value H_0: Robust RMSEA <= 0.050                         0.001
  P-value H_0: Robust RMSEA >= 0.080                         0.842

Standardized Root Mean Square Residual:

  SRMR                                           0.059       0.059

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian


Group 1 [Group 1]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  search_t1 =~                                                          
    m01_02_           1.000                               1.258    0.794
    m01_03_ (lmb2)    1.098    0.048   22.660    0.000    1.381    0.842
    m01_07_ (lmb3)    1.095    0.058   18.739    0.000    1.377    0.828
    m01_08_ (lmb4)    1.162    0.071   16.380    0.000    1.461    0.874
    m01_10_ (lmb5)    1.145    0.067   17.153    0.000    1.441    0.812
  search_t2 =~                                                          
    m01_02_           1.000                               1.281    0.799
    m01_03_ (lmb2)    1.098    0.048   22.660    0.000    1.406    0.846
    m01_07_ (lmb3)    1.095    0.058   18.739    0.000    1.402    0.833
    m01_08_ (lmb4)    1.162    0.071   16.380    0.000    1.488    0.877
    m01_10_ (lmb5)    1.145    0.067   17.153    0.000    1.467    0.817
  d_search_1 =~                                                         
    srch_t2           1.000                               0.688    0.688

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  search_t2 ~                                                           
    search_t1         1.000                               0.982    0.982

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  search_t1 ~~                                                          
    d_src_1 (fb_a)   -0.359    0.117   -3.084    0.002   -0.324   -0.324
 .ml01_02_t1 ~~                                                         
   .m01_02_ (cov1)    0.079    0.120    0.659    0.510    0.079    0.085
 .ml01_03_t1 ~~                                                         
   .m01_03_ (cov2)    0.149    0.084    1.770    0.077    0.149    0.189
 .ml01_07_t1 ~~                                                         
   .m01_07_ (cov3)    0.224    0.089    2.516    0.012    0.224    0.257
 .ml01_08_t1 ~~                                                         
   .m01_08_ (cov4)    0.157    0.145    1.084    0.278    0.157    0.237
 .ml01_10_t1 ~~                                                         
   .m01_10_ (cov5)    0.196    0.168    1.165    0.244    0.196    0.183

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .srch_t2           0.000                               0.000    0.000
    d_src_1 (d_n_)   -0.144    0.081   -1.788    0.074   -0.164   -0.164
    srch_t1 (wb__)    5.158    0.116   44.347    0.000    4.100    4.100
   .m01_02_           0.000                               0.000    0.000
   .m01_03_  (m2a)   -0.631    0.279   -2.263    0.024   -0.631   -0.384
   .m01_07_  (m3a)   -0.767    0.327   -2.344    0.019   -0.767   -0.461
   .m01_08_  (m4a)   -1.118    0.402   -2.779    0.005   -1.118   -0.668
   .m01_10_  (m5a)   -1.283    0.367   -3.491    0.000   -1.283   -0.723
   .m01_02_           0.000                               0.000    0.000
   .m01_03_  (m2a)   -0.631    0.279   -2.263    0.024   -0.631   -0.379
   .m01_07_  (m3a)   -0.767    0.327   -2.344    0.019   -0.767   -0.455
   .m01_08_  (m4a)   -1.118    0.402   -2.779    0.005   -1.118   -0.659
   .m01_10_  (m5a)   -1.283    0.367   -3.491    0.000   -1.283   -0.715

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .srch_t2           0.000                               0.000    0.000
    d_src_1 (d_v_)    0.777    0.161    4.814    0.000    1.000    1.000
    srch_t1 (wb__)    1.582    0.266    5.960    0.000    1.000    1.000
   .m01_02_ (res1)    0.930    0.129    7.190    0.000    0.930    0.370
   .m01_03_ (res2)    0.785    0.136    5.793    0.000    0.785    0.292
   .m01_07_ (res3)    0.870    0.117    7.431    0.000    0.870    0.314
   .m01_08_ (res4)    0.662    0.143    4.619    0.000    0.662    0.237
   .m01_10_ (res5)    1.070    0.187    5.731    0.000    1.070    0.340
   .m01_02_ (res1)    0.930    0.129    7.190    0.000    0.930    0.362
   .m01_03_ (res2)    0.785    0.136    5.793    0.000    0.785    0.284
   .m01_07_ (res3)    0.870    0.117    7.431    0.000    0.870    0.307
   .m01_08_ (res4)    0.662    0.143    4.619    0.000    0.662    0.230
   .m01_10_ (res5)    1.070    0.187    5.731    0.000    1.070    0.332


Group 2 [Group 2]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  search_t1 =~                                                          
    m01_02_           1.000                               1.191    0.737
    m01_03_ (lmb2)    1.098    0.048   22.660    0.000    1.308    0.823
    m01_07_ (lmb3)    1.095    0.058   18.739    0.000    1.303    0.849
    m01_08_ (lmb4)    1.162    0.071   16.380    0.000    1.383    0.897
    m01_10_ (lmb5)    1.145    0.067   17.153    0.000    1.364    0.806
  search_t2 =~                                                          
    m01_02_           1.000                               1.245    0.751
    m01_03_ (lmb2)    1.098    0.048   22.660    0.000    1.367    0.834
    m01_07_ (lmb3)    1.095    0.058   18.739    0.000    1.363    0.859
    m01_08_ (lmb4)    1.162    0.071   16.380    0.000    1.447    0.905
    m01_10_ (lmb5)    1.145    0.067   17.153    0.000    1.426    0.818
  d_search_1 =~                                                         
    srch_t2           1.000                               0.716    0.716

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  search_t2 ~                                                           
    search_t1         1.000                               0.956    0.956

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  search_t1 ~~                                                          
    d_src_1 (fb_b)   -0.331    0.132   -2.502    0.012   -0.312   -0.312
 .ml01_02_t1 ~~                                                         
   .m01_02_ (cv1b)    0.452    0.173    2.604    0.009    0.452    0.378
 .ml01_03_t1 ~~                                                         
   .m01_03_ (cv2b)    0.321    0.145    2.205    0.027    0.321    0.393
 .ml01_07_t1 ~~                                                         
   .m01_07_ (cv3b)    0.137    0.123    1.112    0.266    0.137    0.208
 .ml01_08_t1 ~~                                                         
   .m01_08_ (cv4b)    0.085    0.077    1.112    0.266    0.085    0.184
 .ml01_10_t1 ~~                                                         
   .m01_10_ (cv5b)    0.228    0.124    1.831    0.067    0.228    0.227

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .srch_t2           0.000                               0.000    0.000
    d_src_1 (d_n_)   -0.147    0.081   -1.804    0.071   -0.164   -0.164
    srch_t1 (wb__)    5.287    0.105   50.312    0.000    4.440    4.440
   .m01_02_           0.000                               0.000    0.000
   .m01_03_  (m2b)   -0.764    0.277   -2.757    0.006   -0.764   -0.481
   .m01_07_  (m3b)   -0.841    0.340   -2.472    0.013   -0.841   -0.548
   .m01_08_  (m4b)   -1.217    0.412   -2.957    0.003   -1.217   -0.789
   .m01_10_  (m5b)   -1.321    0.387   -3.413    0.001   -1.321   -0.781
   .m01_02_           0.000                               0.000    0.000
   .m01_03_  (m2b)   -0.764    0.277   -2.757    0.006   -0.764   -0.466
   .m01_07_  (m3b)   -0.841    0.340   -2.472    0.013   -0.841   -0.530
   .m01_08_  (m4b)   -1.217    0.412   -2.957    0.003   -1.217   -0.761
   .m01_10_  (m5b)   -1.321    0.387   -3.413    0.001   -1.321   -0.758

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .srch_t2           0.000                               0.000    0.000
    d_src_1 (d_v_)    0.795    0.187    4.253    0.000    1.000    1.000
    srch_t1 (wb__)    1.418    0.221    6.421    0.000    1.000    1.000
   .m01_02_ (rs1b)    1.196    0.171    6.989    0.000    1.196    0.458
   .m01_03_ (rs2b)    0.816    0.142    5.735    0.000    0.816    0.323
   .m01_07_ (rs3b)    0.660    0.120    5.488    0.000    0.660    0.280
   .m01_08_ (rs4b)    0.465    0.081    5.762    0.000    0.465    0.195
   .m01_10_ (rs5b)    1.005    0.137    7.333    0.000    1.005    0.351
   .m01_02_ (rs1b)    1.196    0.171    6.989    0.000    1.196    0.436
   .m01_03_ (rs2b)    0.816    0.142    5.735    0.000    0.816    0.304
   .m01_07_ (rs3b)    0.660    0.120    5.488    0.000    0.660    0.262
   .m01_08_ (rs4b)    0.465    0.081    5.762    0.000    0.465    0.182
   .m01_10_ (rs5b)    1.005    0.137    7.333    0.000    1.005    0.331
7.1.1.3 Self-esteem

Self-esteem: fitting multi-group models

Show the code
# adapt latent change score model from above and add grouping factor in estimation (also add vectorized equality constraints to the model)

# configural invariance
mi_lcs_selfes_group_config <- '
selfes_t1 =~ 1*selfes_par1_t1 + c("lamb2a", "lamb2b")*selfes_par2_t1 + c("lamb3a", "lamb3b")*selfes_par3_t1 # This specifies the measurement model for selfes_t1 
selfes_t2 =~ 1*selfes_par1_t2 + c("lamb2a", "lamb2b")*selfes_par2_t2 + c("lamb3a", "lamb3b")*selfes_par3_t2 # This specifies the measurement model for selfes_t2 with the equality constrained factor loadings

selfes_t2 ~ 1*selfes_t1     # This parameter regresses selfes_t2 perfectly on selfes_t1
d_selfes_1 =~ 1*selfes_t2   # This defines the latent change score factor as measured perfectly by scores on selfes_t2
selfes_t2 ~ 0*1           # This line constrains the intercept of selfes_t2 to 0
selfes_t2 ~~ 0*selfes_t2    # This fixes the variance of selfes_t2 to 0

d_selfes_1 ~ c("d_int_a", "d_int_b")*1           # This estimates the intercept of the change score 
selfes_t1 ~ c("wb_int_a", "wb_int_b")*1            # This estimates the intercept of selfes_t1 
d_selfes_1 ~~ c("d_var_a", "d_var_b")*d_selfes_1   # This estimates the variance of the change scores 
selfes_t1 ~~ c("wb_var_a", "wb_var_b")*selfes_t1     # This estimates the variance of the selfes_t1 
d_selfes_1 ~~ c("fb_a", "fb_b")*selfes_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

selfes_par1_t1 ~~ c("cov1a", "cov1b")*selfes_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
selfes_par2_t1 ~~ c("cov2a", "cov2b")*selfes_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
selfes_par3_t1 ~~ c("cov3a", "cov3b")*selfes_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

selfes_par1_t1 ~~ c("res1a", "res1b")*selfes_par1_t1   # This allows residual variance on indicator X1 at T1 
selfes_par2_t1 ~~ c("res2a", "res2b")*selfes_par2_t1   # This allows residual variance on indicator X2 at T1
selfes_par3_t1 ~~ c("res3a", "res3b")*selfes_par3_t1   # This allows residual variance on indicator X3 at T1

selfes_par1_t2 ~~ c("res1a", "res1b")*selfes_par1_t2  # This allows residual variance on indicator X1 at T2 
selfes_par2_t2 ~~ c("res2a", "res2b")*selfes_par2_t2  # This allows residual variance on indicator X2 at T2 
selfes_par3_t2 ~~ c("res3a", "res3b")*selfes_par3_t2  # This allows residual variance on indicator X3 at T2

selfes_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
selfes_par2_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
selfes_par3_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1

selfes_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
selfes_par2_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
selfes_par3_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
'
lcs_selfes_group_12_config <- sem(mi_lcs_selfes_group_config, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', 
                                fixed.x=FALSE, missing='fiml', group = "group")

# weak invariance
mi_lcs_selfes_group_weak <- '
selfes_t1 =~ 1*selfes_par1_t1 + c("lamb2", "lamb2")*selfes_par2_t1 + c("lamb3", "lamb3")*selfes_par3_t1 # This specifies the measurement model for selfes_t1 
selfes_t2 =~ 1*selfes_par1_t2 + c("lamb2", "lamb2")*selfes_par2_t2 + c("lamb3", "lamb3")*selfes_par3_t2 # This specifies the measurement model for selfes_t2 with the equality constrained factor loadings

selfes_t2 ~ 1*selfes_t1     # This parameter regresses selfes_t2 perfectly on selfes_t1
d_selfes_1 =~ 1*selfes_t2   # This defines the latent change score factor as measured perfectly by scores on selfes_t2
selfes_t2 ~ 0*1           # This line constrains the intercept of selfes_t2 to 0
selfes_t2 ~~ 0*selfes_t2    # This fixes the variance of selfes_t2 to 0

d_selfes_1 ~ c("d_int_a", "d_int_b")*1           # This estimates the intercept of the change score 
selfes_t1 ~ c("wb_int_a", "wb_int_b")*1            # This estimates the intercept of selfes_t1 
d_selfes_1 ~~ c("d_var_a", "d_var_b")*d_selfes_1   # This estimates the variance of the change scores 
selfes_t1 ~~ c("wb_var_a", "wb_var_b")*selfes_t1     # This estimates the variance of the selfes_t1 
d_selfes_1 ~~ c("fb_a", "fb_b")*selfes_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

selfes_par1_t1 ~~ c("cov1a", "cov1b")*selfes_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
selfes_par2_t1 ~~ c("cov2a", "cov2b")*selfes_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
selfes_par3_t1 ~~ c("cov3a", "cov3b")*selfes_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

selfes_par1_t1 ~~ c("res1a", "res1b")*selfes_par1_t1   # This allows residual variance on indicator X1 at T1 
selfes_par2_t1 ~~ c("res2a", "res2b")*selfes_par2_t1   # This allows residual variance on indicator X2 at T1
selfes_par3_t1 ~~ c("res3a", "res3b")*selfes_par3_t1   # This allows residual variance on indicator X3 at T1

selfes_par1_t2 ~~ c("res1a", "res1b")*selfes_par1_t2  # This allows residual variance on indicator X1 at T2 
selfes_par2_t2 ~~ c("res2a", "res2b")*selfes_par2_t2  # This allows residual variance on indicator X2 at T2 
selfes_par3_t2 ~~ c("res3a", "res3b")*selfes_par3_t2  # This allows residual variance on indicator X3 at T2

selfes_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
selfes_par2_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
selfes_par3_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1

selfes_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
selfes_par2_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
selfes_par3_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
'

lcs_selfes_group_12_weak <- sem(mi_lcs_selfes_group_weak, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                           group = "group", group.equal = "loadings")

# strong invariance
mi_lcs_selfes_group_strong <- '
selfes_t1 =~ 1*selfes_par1_t1 + c("lamb2", "lamb2")*selfes_par2_t1 + c("lamb3", "lamb3")*selfes_par3_t1 # This specifies the measurement model for selfes_t1 
selfes_t2 =~ 1*selfes_par1_t2 + c("lamb2", "lamb2")*selfes_par2_t2 + c("lamb3", "lamb3")*selfes_par3_t2 # This specifies the measurement model for selfes_t2 with the equality constrained factor loadings

selfes_t2 ~ 1*selfes_t1     # This parameter regresses selfes_t2 perfectly on selfes_t1
d_selfes_1 =~ 1*selfes_t2   # This defines the latent change score factor as measured perfectly by scores on selfes_t2
selfes_t2 ~ 0*1           # This line constrains the intercept of selfes_t2 to 0
selfes_t2 ~~ 0*selfes_t2    # This fixes the variance of selfes_t2 to 0

d_selfes_1 ~ c("d_int", "d_int")*1           # This estimates the intercept of the change score 
selfes_t1 ~ c("wb_int", "wb_int")*1            # This estimates the intercept of selfes_t1 
d_selfes_1 ~~ c("d_var_a", "d_var_b")*d_selfes_1   # This estimates the variance of the change scores 
selfes_t1 ~~ c("wb_var_a", "wb_var_b")*selfes_t1     # This estimates the variance of the selfes_t1 
d_selfes_1 ~~ c("fb_a", "fb_b")*selfes_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

selfes_par1_t1 ~~ c("cov1a", "cov1b")*selfes_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
selfes_par2_t1 ~~ c("cov2a", "cov2b")*selfes_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
selfes_par3_t1 ~~ c("cov3a", "cov3b")*selfes_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

selfes_par1_t1 ~~ c("res1a", "res1b")*selfes_par1_t1   # This allows residual variance on indicator X1 at T1 
selfes_par2_t1 ~~ c("res2a", "res2b")*selfes_par2_t1   # This allows residual variance on indicator X2 at T1
selfes_par3_t1 ~~ c("res3a", "res3b")*selfes_par3_t1   # This allows residual variance on indicator X3 at T1

selfes_par1_t2 ~~ c("res1a", "res1b")*selfes_par1_t2  # This allows residual variance on indicator X1 at T2 
selfes_par2_t2 ~~ c("res2a", "res2b")*selfes_par2_t2  # This allows residual variance on indicator X2 at T2 
selfes_par3_t2 ~~ c("res3a", "res3b")*selfes_par3_t2  # This allows residual variance on indicator X3 at T2

selfes_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
selfes_par2_t1 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T1
selfes_par3_t1 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T1

selfes_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
selfes_par2_t2 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T2
selfes_par3_t2 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T2
'

lcs_selfes_group_12_strong <- sem(mi_lcs_selfes_group_strong, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                             group = "group", group.equal = c("intercepts", "loadings"))

Meaning in life: results

# model comparison tests for measurement invariance
lavTestLRT(lcs_selfes_group_12_config, lcs_selfes_group_12_weak, lcs_selfes_group_12_strong)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                           Df    AIC    BIC  Chisq Chisq diff Df diff
lcs_selfes_group_12_config 24 3696.0 3812.1 19.460                   
lcs_selfes_group_12_weak   26 3692.2 3800.5 19.616     0.1506       2
lcs_selfes_group_12_strong 30 3701.5 3794.3 36.898    16.7031       4
                           Pr(>Chisq)   
lcs_selfes_group_12_config              
lcs_selfes_group_12_weak     0.927464   
lcs_selfes_group_12_strong   0.002207 **
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# show model with varying latent change parameters 
# -> key parameter is "d_selfes_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 1, b = Group 2)
params_lcs_selfes_group_12_weak <- broom::tidy(lcs_selfes_group_12_weak, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="selfes" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_selfes_1 ~1 ")) %>% mutate(sig_diff = "**")
kable(params_lcs_selfes_group_12_weak, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_selfes_1 ~1 d_int_a 0.108 0.122 0.247 2.765 0.006 **
d_selfes_1 ~1 d_int_b 0.166 0.187 0.398 4.158 0.000 **
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(lcs_selfes_group_12_strong, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="selfes" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_selfes_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_selfes_1 ~1 d_int 0.139 0.157 0.315 4.742 0
d_selfes_1 ~1 d_int 0.139 0.157 0.331 4.742 0

Significantly more positive change in self-esteem in Group 2 compared to Group 1.

# whole model (weak invariance)
summary(lcs_selfes_group_12_weak, fit.measures=TRUE, standardized=TRUE, rsquare=F)
lavaan 0.6.15 ended normally after 66 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        44
  Number of equality constraints                    16

  Number of observations per group:                   
    Group 1                                        175
    Group 2                                        179
  Number of missing patterns per group:               
    Group 1                                          3
    Group 2                                          2

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                19.616      18.369
  Degrees of freedom                                26          26
  P-value (Chi-square)                           0.809       0.862
  Scaling correction factor                                  1.068
    Yuan-Bentler correction (Mplus variant)                       
  Test statistic for each group:
    Group 1                                      4.565       4.274
    Group 2                                     15.052      14.095

Model Test Baseline Model:

  Test statistic                              1755.372    1429.002
  Degrees of freedom                                30          30
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.228

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    1.000       1.000
  Tucker-Lewis Index (TLI)                       1.004       1.006
                                                                  
  Robust Comparative Fit Index (CFI)                         1.000
  Robust Tucker-Lewis Index (TLI)                            1.005

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1818.093   -1818.093
  Scaling correction factor                                  0.681
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -1808.285   -1808.285
  Scaling correction factor                                  1.069
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                3692.186    3692.186
  Bayesian (BIC)                              3800.527    3800.527
  Sample-size adjusted Bayesian (SABIC)       3711.699    3711.699

Root Mean Square Error of Approximation:

  RMSEA                                          0.000       0.000
  90 Percent confidence interval - lower         0.000       0.000
  90 Percent confidence interval - upper         0.038       0.031
  P-value H_0: RMSEA <= 0.050                    0.982       0.991
  P-value H_0: RMSEA >= 0.080                    0.000       0.000
                                                                  
  Robust RMSEA                                               0.000
  90 Percent confidence interval - lower                     0.000
  90 Percent confidence interval - upper                     0.039
  P-value H_0: Robust RMSEA <= 0.050                         0.979
  P-value H_0: Robust RMSEA >= 0.080                         0.000

Standardized Root Mean Square Residual:

  SRMR                                           0.031       0.031

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian


Group 1 [Group 1]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t1 =~                                                          
    slf_1_1           1.000                               0.772    0.900
    slf_2_1 (lmb2)    1.155    0.040   28.592    0.000    0.891    0.931
    slf_3_1 (lmb3)    1.083    0.050   21.584    0.000    0.836    0.805
  selfes_t2 =~                                                          
    slf_1_2           1.000                               0.760    0.897
    slf_2_2 (lmb2)    1.155    0.040   28.592    0.000    0.877    0.929
    slf_3_2 (lmb3)    1.083    0.050   21.584    0.000    0.823    0.801
  d_selfes_1 =~                                                         
    slfs_t2           1.000                               0.577    0.577

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t2 ~                                                           
    selfes_t1         1.000                               1.016    1.016

Covariances:
                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t1 ~~                                                           
    d_slf_1 (fb_a)    -0.105    0.027   -3.846    0.000   -0.311   -0.311
 .selfes_par1_t1 ~~                                                      
   .slf_1_2 (cov1)     0.072    0.019    3.783    0.000    0.072    0.516
 .selfes_par2_t1 ~~                                                      
   .slf_2_2 (cov2)     0.067    0.025    2.627    0.009    0.067    0.546
 .selfes_par3_t1 ~~                                                      
   .slf_3_2 (cov3)     0.130    0.037    3.463    0.001    0.130    0.342

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .slfs_t2           0.000                               0.000    0.000
    d_slf_1 (d_n_)    0.108    0.039    2.765    0.006    0.247    0.247
    slfs_t1 (wb__)    3.674    0.064   57.373    0.000    4.759    4.759
   .slf_1_1           0.000                               0.000    0.000
   .slf_2_1  (m2a)   -0.838    0.164   -5.108    0.000   -0.838   -0.875
   .slf_3_1  (m3a)   -1.202    0.192   -6.253    0.000   -1.202   -1.158
   .slf_1_2           0.000                               0.000    0.000
   .slf_2_2  (m2a)   -0.838    0.164   -5.108    0.000   -0.838   -0.887
   .slf_3_2  (m3a)   -1.202    0.192   -6.253    0.000   -1.202   -1.169

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .slfs_t2           0.000                               0.000    0.000
    d_slf_1 (d_v_)    0.192    0.038    5.058    0.000    1.000    1.000
    slfs_t1 (wb__)    0.596    0.063    9.423    0.000    1.000    1.000
   .slf_1_1 (res1)    0.140    0.020    7.066    0.000    0.140    0.191
   .slf_2_1 (res2)    0.122    0.025    4.849    0.000    0.122    0.133
   .slf_3_1 (res3)    0.379    0.037   10.352    0.000    0.379    0.352
   .slf_1_2 (res1)    0.140    0.020    7.066    0.000    0.140    0.196
   .slf_2_2 (res2)    0.122    0.025    4.849    0.000    0.122    0.137
   .slf_3_2 (res3)    0.379    0.037   10.352    0.000    0.379    0.359


Group 2 [Group 2]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t1 =~                                                          
    slf_1_1           1.000                               0.738    0.879
    slf_2_1 (lmb2)    1.155    0.040   28.592    0.000    0.852    0.913
    slf_3_1 (lmb3)    1.083    0.050   21.584    0.000    0.799    0.780
  selfes_t2 =~                                                          
    slf_1_2           1.000                               0.751    0.883
    slf_2_2 (lmb2)    1.155    0.040   28.592    0.000    0.867    0.916
    slf_3_2 (lmb3)    1.083    0.050   21.584    0.000    0.814    0.786
  d_selfes_1 =~                                                         
    slfs_t2           1.000                               0.555    0.555

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t2 ~                                                           
    selfes_t1         1.000                               0.982    0.982

Covariances:
                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t1 ~~                                                           
    d_slf_1 (fb_b)    -0.077    0.034   -2.289    0.022   -0.251   -0.251
 .selfes_par1_t1 ~~                                                      
   .slf_1_2 (cv1b)     0.060    0.024    2.441    0.015    0.060    0.373
 .selfes_par2_t1 ~~                                                      
   .slf_2_2 (cv2b)     0.058    0.029    2.017    0.044    0.058    0.399
 .selfes_par3_t1 ~~                                                      
   .slf_3_2 (cv3b)     0.108    0.040    2.710    0.007    0.108    0.264

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .slfs_t2           0.000                               0.000    0.000
    d_slf_1 (d_n_)    0.166    0.040    4.158    0.000    0.398    0.398
    slfs_t1 (wb__)    3.420    0.062   55.352    0.000    4.635    4.635
   .slf_1_1           0.000                               0.000    0.000
   .slf_2_1  (m2b)   -0.772    0.151   -5.096    0.000   -0.772   -0.827
   .slf_3_1  (m3b)   -0.966    0.187   -5.155    0.000   -0.966   -0.943
   .slf_1_2           0.000                               0.000    0.000
   .slf_2_2  (m2b)   -0.772    0.151   -5.096    0.000   -0.772   -0.815
   .slf_3_2  (m3b)   -0.966    0.187   -5.155    0.000   -0.966   -0.933

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .slfs_t2           0.000                               0.000    0.000
    d_slf_1 (d_v_)    0.174    0.036    4.817    0.000    1.000    1.000
    slfs_t1 (wb__)    0.544    0.061    8.914    0.000    1.000    1.000
   .slf_1_1 (rs1b)    0.160    0.024    6.757    0.000    0.160    0.227
   .slf_2_1 (rs2b)    0.145    0.027    5.425    0.000    0.145    0.166
   .slf_3_1 (rs3b)    0.410    0.040   10.133    0.000    0.410    0.391
   .slf_1_2 (rs1b)    0.160    0.024    6.757    0.000    0.160    0.221
   .slf_2_2 (rs2b)    0.145    0.027    5.425    0.000    0.145    0.161
   .slf_3_2 (rs3b)    0.410    0.040   10.133    0.000    0.410    0.383
7.1.1.4 Self-concept clarity

Self-concept clarity: fitting multi-group models

Show the code
# adapt latent change score model from above and add grouping factor in estimation (also add vectorized equality constraints to the model)

# configural invariance
mi_lcs_concept_group_config <- '
concept_t1 =~ 1*concept_par1_t1 + c("lamb2a", "lamb2b")*concept_par2_t1 + c("lamb3a", "lamb3b")*concept_par3_t1 # This specifies the measurement model for concept_t1 
concept_t2 =~ 1*concept_par1_t2 + c("lamb2a", "lamb2b")*concept_par2_t2 + c("lamb3a", "lamb3b")*concept_par3_t2 # This specifies the measurement model for concept_t2 with the equality constrained factor loadings

concept_t2 ~ 1*concept_t1     # This parameter regresses concept_t2 perfectly on concept_t1
d_concept_1 =~ 1*concept_t2   # This defines the latent change score factor as measured perfectly by scores on concept_t2
concept_t2 ~ 0*1           # This line constrains the intercept of concept_t2 to 0
concept_t2 ~~ 0*concept_t2    # This fixes the variance of concept_t2 to 0

d_concept_1 ~ c("d_int_a", "d_int_b")*1           # This estimates the intercept of the change score 
concept_t1 ~ c("wb_int_a", "wb_int_b")*1            # This estimates the intercept of concept_t1 
d_concept_1 ~~ c("d_var_a", "d_var_b")*d_concept_1   # This estimates the variance of the change scores 
concept_t1 ~~ c("wb_var_a", "wb_var_b")*concept_t1     # This estimates the variance of the concept_t1 
d_concept_1 ~~ c("fb_a", "fb_b")*concept_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

concept_par1_t1 ~~ c("cov1a", "cov1b")*concept_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
concept_par2_t1 ~~ c("cov2a", "cov2b")*concept_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
concept_par3_t1 ~~ c("cov3a", "cov3b")*concept_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

concept_par1_t1 ~~ c("res1a", "res1b")*concept_par1_t1   # This allows residual variance on indicator X1 at T1 
concept_par2_t1 ~~ c("res2a", "res2b")*concept_par2_t1   # This allows residual variance on indicator X2 at T1
concept_par3_t1 ~~ c("res3a", "res3b")*concept_par3_t1   # This allows residual variance on indicator X3 at T1

concept_par1_t2 ~~ c("res1a", "res1b")*concept_par1_t2  # This allows residual variance on indicator X1 at T2 
concept_par2_t2 ~~ c("res2a", "res2b")*concept_par2_t2  # This allows residual variance on indicator X2 at T2 
concept_par3_t2 ~~ c("res3a", "res3b")*concept_par3_t2  # This allows residual variance on indicator X3 at T2

concept_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
concept_par2_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
concept_par3_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1

concept_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
concept_par2_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
concept_par3_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
'
lcs_concept_group_12_config <- sem(mi_lcs_concept_group_config, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', 
                                fixed.x=FALSE, missing='fiml', group = "group")

# weak invariance
mi_lcs_concept_group_weak <- '
concept_t1 =~ 1*concept_par1_t1 + c("lamb2", "lamb2")*concept_par2_t1 + c("lamb3", "lamb3")*concept_par3_t1 # This specifies the measurement model for concept_t1 
concept_t2 =~ 1*concept_par1_t2 + c("lamb2", "lamb2")*concept_par2_t2 + c("lamb3", "lamb3")*concept_par3_t2 # This specifies the measurement model for concept_t2 with the equality constrained factor loadings

concept_t2 ~ 1*concept_t1     # This parameter regresses concept_t2 perfectly on concept_t1
d_concept_1 =~ 1*concept_t2   # This defines the latent change score factor as measured perfectly by scores on concept_t2
concept_t2 ~ 0*1           # This line constrains the intercept of concept_t2 to 0
concept_t2 ~~ 0*concept_t2    # This fixes the variance of concept_t2 to 0

d_concept_1 ~ c("d_int_a", "d_int_b")*1           # This estimates the intercept of the change score 
concept_t1 ~ c("wb_int_a", "wb_int_b")*1            # This estimates the intercept of concept_t1 
d_concept_1 ~~ c("d_var_a", "d_var_b")*d_concept_1   # This estimates the variance of the change scores 
concept_t1 ~~ c("wb_var_a", "wb_var_b")*concept_t1     # This estimates the variance of the concept_t1 
d_concept_1 ~~ c("fb_a", "fb_b")*concept_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

concept_par1_t1 ~~ c("cov1a", "cov1b")*concept_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
concept_par2_t1 ~~ c("cov2a", "cov2b")*concept_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
concept_par3_t1 ~~ c("cov3a", "cov3b")*concept_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

concept_par1_t1 ~~ c("res1a", "res1b")*concept_par1_t1   # This allows residual variance on indicator X1 at T1 
concept_par2_t1 ~~ c("res2a", "res2b")*concept_par2_t1   # This allows residual variance on indicator X2 at T1
concept_par3_t1 ~~ c("res3a", "res3b")*concept_par3_t1   # This allows residual variance on indicator X3 at T1

concept_par1_t2 ~~ c("res1a", "res1b")*concept_par1_t2  # This allows residual variance on indicator X1 at T2 
concept_par2_t2 ~~ c("res2a", "res2b")*concept_par2_t2  # This allows residual variance on indicator X2 at T2 
concept_par3_t2 ~~ c("res3a", "res3b")*concept_par3_t2  # This allows residual variance on indicator X3 at T2

concept_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
concept_par2_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
concept_par3_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1

concept_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
concept_par2_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
concept_par3_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
'

lcs_concept_group_12_weak <- sem(mi_lcs_concept_group_weak, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                           group = "group", group.equal = "loadings")

# strong invariance
mi_lcs_concept_group_strong <- '
concept_t1 =~ 1*concept_par1_t1 + c("lamb2", "lamb2")*concept_par2_t1 + c("lamb3", "lamb3")*concept_par3_t1 # This specifies the measurement model for concept_t1 
concept_t2 =~ 1*concept_par1_t2 + c("lamb2", "lamb2")*concept_par2_t2 + c("lamb3", "lamb3")*concept_par3_t2 # This specifies the measurement model for concept_t2 with the equality constrained factor loadings

concept_t2 ~ 1*concept_t1     # This parameter regresses concept_t2 perfectly on concept_t1
d_concept_1 =~ 1*concept_t2   # This defines the latent change score factor as measured perfectly by scores on concept_t2
concept_t2 ~ 0*1           # This line constrains the intercept of concept_t2 to 0
concept_t2 ~~ 0*concept_t2    # This fixes the variance of concept_t2 to 0

d_concept_1 ~ c("d_int", "d_int")*1           # This estimates the intercept of the change score 
concept_t1 ~ c("wb_int", "wb_int")*1            # This estimates the intercept of concept_t1 
d_concept_1 ~~ c("d_var_a", "d_var_b")*d_concept_1   # This estimates the variance of the change scores 
concept_t1 ~~ c("wb_var_a", "wb_var_b")*concept_t1     # This estimates the variance of the concept_t1 
d_concept_1 ~~ c("fb_a", "fb_b")*concept_t1     # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

concept_par1_t1 ~~ c("cov1a", "cov1b")*concept_par1_t2   # This allows residual covariance on indicator X1 across T1 and T2
concept_par2_t1 ~~ c("cov2a", "cov2b")*concept_par2_t2   # This allows residual covariance on indicator X2 across T1 and T2
concept_par3_t1 ~~ c("cov3a", "cov3b")*concept_par3_t2   # This allows residual covariance on indicator X3 across T1 and T2

concept_par1_t1 ~~ c("res1a", "res1b")*concept_par1_t1   # This allows residual variance on indicator X1 at T1 
concept_par2_t1 ~~ c("res2a", "res2b")*concept_par2_t1   # This allows residual variance on indicator X2 at T1
concept_par3_t1 ~~ c("res3a", "res3b")*concept_par3_t1   # This allows residual variance on indicator X3 at T1

concept_par1_t2 ~~ c("res1a", "res1b")*concept_par1_t2  # This allows residual variance on indicator X1 at T2 
concept_par2_t2 ~~ c("res2a", "res2b")*concept_par2_t2  # This allows residual variance on indicator X2 at T2 
concept_par3_t2 ~~ c("res3a", "res3b")*concept_par3_t2  # This allows residual variance on indicator X3 at T2

concept_par1_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
concept_par2_t1 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T1
concept_par3_t1 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T1

concept_par1_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
concept_par2_t2 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T2
concept_par3_t2 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T2
'

lcs_concept_group_12_strong <- sem(mi_lcs_concept_group_strong, data=df_sbsa3_wide_wb %>% filter(group!="Group 3"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                             group = "group", group.equal = c("intercepts", "loadings"))

Self-concept clarity: results

# model comparison tests for measurement invariance
lavTestLRT(lcs_concept_group_12_config, lcs_concept_group_12_weak, lcs_concept_group_12_strong)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                            Df    AIC    BIC  Chisq Chisq diff Df diff
lcs_concept_group_12_config 24 3904.5 4020.6 30.375                   
lcs_concept_group_12_weak   26 3908.1 4016.4 37.911     7.7286       2
lcs_concept_group_12_strong 30 3905.3 3998.2 43.199     5.2123       4
                            Pr(>Chisq)  
lcs_concept_group_12_config             
lcs_concept_group_12_weak      0.02098 *
lcs_concept_group_12_strong    0.26620  
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# show model with varying latent change parameters 
# -> key parameter is "d_concept_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 1, b = Group 2)
params_lcs_concept_group_12_weak <- broom::tidy(lcs_concept_group_12_weak, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="concept" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_concept_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_lcs_concept_group_12_weak, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_concept_1 ~1 d_int_a 0.159 0.182 0.324 3.578 0.000
d_concept_1 ~1 d_int_b 0.080 0.091 0.181 1.903 0.057
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(lcs_concept_group_12_strong, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="concept" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_concept_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_concept_1 ~1 d_int 0.117 0.134 0.237 3.826 0
d_concept_1 ~1 d_int 0.117 0.134 0.265 3.826 0

More positive change in self-concept clarity in the Group 1 compared to Group 2 but no substantial differences according to the LRTs. Slight caveat: weak measurement invariance could not be established as a first step at p < .05.

# whole model (weak invariance)
summary(lcs_concept_group_12_weak, fit.measures=TRUE, standardized=TRUE, rsquare=F)
lavaan 0.6.15 ended normally after 60 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        44
  Number of equality constraints                    16

  Number of observations per group:                   
    Group 1                                        175
    Group 2                                        179
  Number of missing patterns per group:               
    Group 1                                          3
    Group 2                                          2

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                37.911      38.055
  Degrees of freedom                                26          26
  P-value (Chi-square)                           0.062       0.060
  Scaling correction factor                                  0.996
    Yuan-Bentler correction (Mplus variant)                       
  Test statistic for each group:
    Group 1                                     26.078      26.176
    Group 2                                     11.834      11.878

Model Test Baseline Model:

  Test statistic                              1625.355    1381.998
  Degrees of freedom                                30          30
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.176

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.993       0.991
  Tucker-Lewis Index (TLI)                       0.991       0.990
                                                                  
  Robust Comparative Fit Index (CFI)                         0.992
  Robust Tucker-Lewis Index (TLI)                            0.991

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1926.027   -1926.027
  Scaling correction factor                                  0.670
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -1907.072   -1907.072
  Scaling correction factor                                  1.025
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                3908.054    3908.054
  Bayesian (BIC)                              4016.395    4016.395
  Sample-size adjusted Bayesian (SABIC)       3927.567    3927.567

Root Mean Square Error of Approximation:

  RMSEA                                          0.051       0.051
  90 Percent confidence interval - lower         0.000       0.000
  90 Percent confidence interval - upper         0.084       0.084
  P-value H_0: RMSEA <= 0.050                    0.451       0.445
  P-value H_0: RMSEA >= 0.080                    0.077       0.080
                                                                  
  Robust RMSEA                                               0.054
  90 Percent confidence interval - lower                     0.000
  90 Percent confidence interval - upper                     0.089
  P-value H_0: Robust RMSEA <= 0.050                         0.400
  P-value H_0: Robust RMSEA >= 0.080                         0.118

Standardized Root Mean Square Residual:

  SRMR                                           0.040       0.040

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian


Group 1 [Group 1]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t1 =~                                                         
    cnc_1_1           1.000                               0.742    0.824
    cnc_2_1 (lmb2)    1.215    0.049   24.807    0.000    0.902    0.880
    cnc_3_1 (lmb3)    1.217    0.055   22.284    0.000    0.903    0.910
  concept_t2 =~                                                         
    cnc_1_2           1.000                               0.764    0.832
    cnc_2_2 (lmb2)    1.215    0.049   24.807    0.000    0.928    0.886
    cnc_3_2 (lmb3)    1.217    0.055   22.284    0.000    0.930    0.914
  d_concept_1 =~                                                        
    cncpt_2           1.000                               0.642    0.642

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t2 ~                                                          
    concept_t1        1.000                               0.971    0.971

Covariances:
                     Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t1 ~~                                                           
    d_cnc_1 (fb_a)     -0.104    0.035   -2.975    0.003   -0.285   -0.285
 .concept_par1_t1 ~~                                                      
   .cnc_1_2 (cov1)      0.082    0.032    2.541    0.011    0.082    0.313
 .concept_par2_t1 ~~                                                      
   .cnc_2_2 (cov2)      0.090    0.031    2.886    0.004    0.090    0.379
 .concept_par3_t1 ~~                                                      
   .cnc_3_2 (cov3)      0.079    0.028    2.831    0.005    0.079    0.464

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .cncpt_2           0.000                               0.000    0.000
    d_cnc_1 (d_n_)    0.159    0.044    3.578    0.000    0.324    0.324
    cncpt_1 (wb__)    3.043    0.067   45.621    0.000    4.100    4.100
   .cnc_1_1           0.000                               0.000    0.000
   .cnc_2_1  (m2a)   -0.590    0.164   -3.604    0.000   -0.590   -0.576
   .cnc_3_1  (m3a)   -0.528    0.181   -2.923    0.003   -0.528   -0.532
   .cnc_1_2           0.000                               0.000    0.000
   .cnc_2_2  (m2a)   -0.590    0.164   -3.604    0.000   -0.590   -0.563
   .cnc_3_2  (m3a)   -0.528    0.181   -2.923    0.003   -0.528   -0.519

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .cncpt_2           0.000                               0.000    0.000
    d_cnc_1 (d_v_)    0.240    0.056    4.310    0.000    1.000    1.000
    cncpt_1 (wb__)    0.551    0.062    8.909    0.000    1.000    1.000
   .cnc_1_1 (res1)    0.261    0.034    7.716    0.000    0.261    0.321
   .cnc_2_1 (res2)    0.237    0.032    7.454    0.000    0.237    0.226
   .cnc_3_1 (res3)    0.170    0.031    5.545    0.000    0.170    0.173
   .cnc_1_2 (res1)    0.261    0.034    7.716    0.000    0.261    0.309
   .cnc_2_2 (res2)    0.237    0.032    7.454    0.000    0.237    0.216
   .cnc_3_2 (res3)    0.170    0.031    5.545    0.000    0.170    0.165


Group 2 [Group 2]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t1 =~                                                         
    cnc_1_1           1.000                               0.692    0.807
    cnc_2_1 (lmb2)    1.215    0.049   24.807    0.000    0.840    0.854
    cnc_3_1 (lmb3)    1.217    0.055   22.284    0.000    0.842    0.903
  concept_t2 =~                                                         
    cnc_1_2           1.000                               0.703    0.812
    cnc_2_2 (lmb2)    1.215    0.049   24.807    0.000    0.854    0.857
    cnc_3_2 (lmb3)    1.217    0.055   22.284    0.000    0.856    0.906
  d_concept_1 =~                                                        
    cncpt_2           1.000                               0.624    0.624

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t2 ~                                                          
    concept_t1        1.000                               0.984    0.984

Covariances:
                     Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t1 ~~                                                           
    d_cnc_1 (fb_b)     -0.088    0.029   -3.026    0.002   -0.291   -0.291
 .concept_par1_t1 ~~                                                      
   .cnc_1_2 (cv1b)      0.075    0.027    2.760    0.006    0.075    0.293
 .concept_par2_t1 ~~                                                      
   .cnc_2_2 (cv2b)      0.080    0.031    2.618    0.009    0.080    0.304
 .concept_par3_t1 ~~                                                      
   .cnc_3_2 (cv3b)      0.057    0.030    1.905    0.057    0.057    0.352

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .cncpt_2           0.000                               0.000    0.000
    d_cnc_1 (d_n_)    0.080    0.042    1.903    0.057    0.181    0.181
    cncpt_1 (wb__)    2.921    0.059   49.287    0.000    4.222    4.222
   .cnc_1_1           0.000                               0.000    0.000
   .cnc_2_1  (m2b)   -0.534    0.151   -3.527    0.000   -0.534   -0.543
   .cnc_3_1  (m3b)   -0.444    0.168   -2.642    0.008   -0.444   -0.476
   .cnc_1_2           0.000                               0.000    0.000
   .cnc_2_2  (m2b)   -0.534    0.151   -3.527    0.000   -0.534   -0.536
   .cnc_3_2  (m3b)   -0.444    0.168   -2.642    0.008   -0.444   -0.470

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .cncpt_2           0.000                               0.000    0.000
    d_cnc_1 (d_v_)    0.193    0.034    5.748    0.000    1.000    1.000
    cncpt_1 (wb__)    0.478    0.056    8.534    0.000    1.000    1.000
   .cnc_1_1 (rs1b)    0.256    0.025   10.385    0.000    0.256    0.349
   .cnc_2_1 (rs2b)    0.263    0.035    7.426    0.000    0.263    0.271
   .cnc_3_1 (rs3b)    0.161    0.031    5.257    0.000    0.161    0.185
   .cnc_1_2 (rs1b)    0.256    0.025   10.385    0.000    0.256    0.341
   .cnc_2_2 (rs2b)    0.263    0.035    7.426    0.000    0.263    0.265
   .cnc_3_2 (rs3b)    0.161    0.031    5.257    0.000    0.161    0.180

7.1.2 Latent change: differences in personality change

7.1.2.1 Run models

Adapt latent change score model from above (but without any moderation) and add grouping factor in estimation (adding vectorized equality constraints to the model step by step):

Show the code
# create templates:

# configural invariance
trait_template_main_config <- '
trait_t1 =~ 1*ind01_t1 +  c("lamb2a", "lamb2b")*ind02_t1 + c("lamb3a", "lamb3b")*ind03_t1 # This specifies the measurement model for trait_t1 
trait_t2 =~ 1*ind01_t2 +  c("lamb2a", "lamb2b")*ind02_t2 + c("lamb3a", "lamb3b")*ind03_t2 # This specifies the measurement model for trait_t2 with the equality constrained factor loadings

trait_t2 ~ 1*trait_t1     # This parameter regresses trait_t2 perfectly on trait_t1
d_trait_1 =~ 1*trait_t2   # This defines the latent change score factor as measured perfectly by scores on trait_t2
trait_t2 ~ 0*1            # This line constrains the intercept of trait_t2 to 0
trait_t2 ~~ 0*trait_t2    # This fixes the variance of trait_t2 to 0

d_trait_1 ~ c("d_int_a", "d_int_b")*1              # This estimates the intercept of the change score 
trait_t1 ~ c("b5_int_a", "b5_int_b")*1               # This estimates the intercept of trait_t1 
d_trait_1 ~~ c("d_var_a", "d_var_b")*d_trait_1     # This estimates the variance of the change scores 
trait_t1 ~~ c("b5_var_a", "b5_var_b")*trait_t1         # This estimates the variance of trait_t1 
d_trait_1 ~~ c("fb_a", "fb_b")* trait_t1   # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

ind01_t1 ~~ c("cov1a", "cov1b")*ind01_t2   # This allows residual covariance on indicator X1 across T1 and T2
ind02_t1 ~~ c("cov2a", "cov2b")*ind02_t2   # This allows residual covariance on indicator X2 across T1 and T2
ind03_t1 ~~ c("cov3a", "cov3b")*ind03_t2   # This allows residual covariance on indicator X3 across T1 and T2

ind01_t1 ~~ c("res1a", "res1b")*ind01_t1   # This allows residual variance on indicator X1 at T1 
ind02_t1 ~~ c("res2a", "res2b")*ind02_t1   # This allows residual variance on indicator X2 at T1
ind03_t1 ~~ c("res3a", "res3b")*ind03_t1   # This allows residual variance on indicator X3 at T1

ind01_t2 ~~ c("res1a", "res1b")*ind01_t2  # This allows residual variance on indicator X1 at T2 
ind02_t2 ~~ c("res2a", "res2b")*ind02_t2  # This allows residual variance on indicator X2 at T2 
ind03_t2 ~~ c("res3a", "res3b")*ind03_t2  # This allows residual variance on indicator X3 at T2

ind01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ind02_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
ind03_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1

ind01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ind02_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
ind03_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
'

# weak invariance
trait_template_main_weak <- '
trait_t1 =~ 1*ind01_t1 +  c("lamb2", "lamb2")*ind02_t1 + c("lamb3", "lamb3")*ind03_t1 # This specifies the measurement model for trait_t1 
trait_t2 =~ 1*ind01_t2 +  c("lamb2", "lamb2")*ind02_t2 + c("lamb3", "lamb3")*ind03_t2 # This specifies the measurement model for trait_t2 with the equality constrained factor loadings

trait_t2 ~ 1*trait_t1     # This parameter regresses trait_t2 perfectly on trait_t1
d_trait_1 =~ 1*trait_t2   # This defines the latent change score factor as measured perfectly by scores on trait_t2
trait_t2 ~ 0*1            # This line constrains the intercept of trait_t2 to 0
trait_t2 ~~ 0*trait_t2    # This fixes the variance of trait_t2 to 0

d_trait_1 ~ c("d_int_a", "d_int_b")*1              # This estimates the intercept of the change score 
trait_t1 ~ c("b5_int_a", "b5_int_b")*1               # This estimates the intercept of trait_t1 
d_trait_1 ~~ c("d_var_a", "d_var_b")*d_trait_1     # This estimates the variance of the change scores 
trait_t1 ~~ c("b5_var_a", "b5_var_b")*trait_t1         # This estimates the variance of trait_t1 
d_trait_1 ~~ c("fb_a", "fb_b")* trait_t1   # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

ind01_t1 ~~ c("cov1a", "cov1b")*ind01_t2   # This allows residual covariance on indicator X1 across T1 and T2
ind02_t1 ~~ c("cov2a", "cov2b")*ind02_t2   # This allows residual covariance on indicator X2 across T1 and T2
ind03_t1 ~~ c("cov3a", "cov3b")*ind03_t2   # This allows residual covariance on indicator X3 across T1 and T2

ind01_t1 ~~ c("res1a", "res1b")*ind01_t1   # This allows residual variance on indicator X1 at T1 
ind02_t1 ~~ c("res2a", "res2b")*ind02_t1   # This allows residual variance on indicator X2 at T1
ind03_t1 ~~ c("res3a", "res3b")*ind03_t1   # This allows residual variance on indicator X3 at T1

ind01_t2 ~~ c("res1a", "res1b")*ind01_t2  # This allows residual variance on indicator X1 at T2 
ind02_t2 ~~ c("res2a", "res2b")*ind02_t2  # This allows residual variance on indicator X2 at T2 
ind03_t2 ~~ c("res3a", "res3b")*ind03_t2  # This allows residual variance on indicator X3 at T2

ind01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ind02_t1 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T1
ind03_t1 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T1

ind01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ind02_t2 ~ c("m2a", "m2b")*1     # This estimates the intercept of X2 at T2
ind03_t2 ~ c("m3a", "m3b")*1     # This estimates the intercept of X3 at T2
'

# strong invariance
trait_template_main_strong <- '
trait_t1 =~ 1*ind01_t1 +  c("lamb2", "lamb2")*ind02_t1 + c("lamb3", "lamb3")*ind03_t1 # This specifies the measurement model for trait_t1 
trait_t2 =~ 1*ind01_t2 +  c("lamb2", "lamb2")*ind02_t2 + c("lamb3", "lamb3")*ind03_t2 # This specifies the measurement model for trait_t2 with the equality constrained factor loadings

trait_t2 ~ 1*trait_t1     # This parameter regresses trait_t2 perfectly on trait_t1
d_trait_1 =~ 1*trait_t2   # This defines the latent change score factor as measured perfectly by scores on trait_t2
trait_t2 ~ 0*1            # This line constrains the intercept of trait_t2 to 0
trait_t2 ~~ 0*trait_t2    # This fixes the variance of trait_t2 to 0

d_trait_1 ~ c("d_int", "d_int")*1              # This estimates the intercept of the change score 
trait_t1 ~ c("b5_int", "b5_int")*1               # This estimates the intercept of trait_t1 
d_trait_1 ~~ c("d_var_a", "d_var_b")*d_trait_1     # This estimates the variance of the change scores 
trait_t1 ~~ c("b5_var_a", "b5_var_b")*trait_t1         # This estimates the variance of trait_t1 
d_trait_1 ~~ c("fb_a", "fb_b")* trait_t1   # This estimates the self-feedback parameter, as a covariance! -> therefore, the interpretation of the change score

ind01_t1 ~~ c("cov1a", "cov1b")*ind01_t2   # This allows residual covariance on indicator X1 across T1 and T2
ind02_t1 ~~ c("cov2a", "cov2b")*ind02_t2   # This allows residual covariance on indicator X2 across T1 and T2
ind03_t1 ~~ c("cov3a", "cov3b")*ind03_t2   # This allows residual covariance on indicator X3 across T1 and T2

ind01_t1 ~~ c("res1a", "res1b")*ind01_t1   # This allows residual variance on indicator X1 at T1 
ind02_t1 ~~ c("res2a", "res2b")*ind02_t1   # This allows residual variance on indicator X2 at T1
ind03_t1 ~~ c("res3a", "res3b")*ind03_t1   # This allows residual variance on indicator X3 at T1

ind01_t2 ~~ c("res1a", "res1b")*ind01_t2  # This allows residual variance on indicator X1 at T2 
ind02_t2 ~~ c("res2a", "res2b")*ind02_t2  # This allows residual variance on indicator X2 at T2 
ind03_t2 ~~ c("res3a", "res3b")*ind03_t2  # This allows residual variance on indicator X3 at T2

ind01_t1 ~ 0*1      # This constrains the intercept of X1 to 0 at T1
ind02_t1 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T1
ind03_t1 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T1

ind01_t2 ~ 0*1      # This constrains the intercept of X1 to 0 at T2
ind02_t2 ~ c("m2", "m2")*1     # This estimates the intercept of X2 at T2
ind03_t2 ~ c("m3", "m3")*1     # This estimates the intercept of X3 at T2
'

# strict invariance -> not really needed for this analysis

# loop across 5 traits
for (i in 1:5) {
  item_nrs = b5_vars[[i]][[1]]
  short_name = str_trunc(names(b5_vars)[i], 5, ellipsis = "")
  # configural - current
  template_filled_config_current <- str_replace_all(trait_template_main_config, 
                                       c("trait" = short_name,
                                         "ind01" = paste0(short_name, "_curr_par1"), 
                                         "ind02" = paste0(short_name, "_curr_par2"), 
                                         "ind03" = paste0(short_name, "_curr_par3")))
  trait_main_fit_config_current <- lavaan(template_filled_config_current, 
                                          data = df_sbsa3_wide_pers %>% filter(group!="Group 3"), 
                                          estimator='mlr', fixed.x=FALSE, missing='fiml', group = "group")
  eval(call("<-", as.name(paste0("lcs_", short_name, "_main_config_group_12")), template_filled_config_current))
  eval(call("<-", as.name(paste0("fit_lcs_", short_name, "_main_config_group_12")), trait_main_fit_config_current))
  # weak - current
  template_filled_weak_current <- str_replace_all(trait_template_main_weak, 
                                       c("trait" = short_name,
                                         "ind01" = paste0(short_name, "_curr_par1"), 
                                         "ind02" = paste0(short_name, "_curr_par2"), 
                                         "ind03" = paste0(short_name, "_curr_par3")))
  trait_main_fit_weak_current <- lavaan(template_filled_weak_current, 
                                          data = df_sbsa3_wide_pers %>% filter(group!="Group 3"), 
                                          estimator='mlr', fixed.x=FALSE, missing='fiml', group = "group", group.equal = "loadings")
  eval(call("<-", as.name(paste0("lcs_", short_name, "_main_weak_group_12")), template_filled_weak_current))
  eval(call("<-", as.name(paste0("fit_lcs_", short_name, "_main_weak_group_12")), trait_main_fit_weak_current))
  # strong - current
  template_filled_strong_current <- str_replace_all(trait_template_main_strong, 
                                       c("trait" = short_name,
                                         "ind01" = paste0(short_name, "_curr_par1"), 
                                         "ind02" = paste0(short_name, "_curr_par2"), 
                                         "ind03" = paste0(short_name, "_curr_par3")))
  trait_main_fit_strong_current <- lavaan(template_filled_strong_current, 
                                          data = df_sbsa3_wide_pers %>% filter(group!="Group 3"), 
                                          estimator='mlr', fixed.x=FALSE, missing='fiml', group = "group", group.equal = c("intercepts", "loadings"))
  eval(call("<-", as.name(paste0("lcs_", short_name, "_main_strong_group_12")), template_filled_strong_current))
  eval(call("<-", as.name(paste0("fit_lcs_", short_name, "_main_strong_group_12")), trait_main_fit_strong_current))
}
7.1.2.2 Current personality personality traits

Extraversion (current personality): results

# model comparison tests for measurement invariance
lavTestLRT(fit_lcs_extra_main_config_group_12, fit_lcs_extra_main_weak_group_12, fit_lcs_extra_main_strong_group_12)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                                   Df    AIC    BIC  Chisq Chisq diff Df diff
fit_lcs_extra_main_config_group_12 24 3307.9 3423.9 24.541                   
fit_lcs_extra_main_weak_group_12   26 3307.4 3415.7 28.071     3.7366       2
fit_lcs_extra_main_strong_group_12 30 3302.9 3395.7 31.554     3.4568       4
                                   Pr(>Chisq)
fit_lcs_extra_main_config_group_12           
fit_lcs_extra_main_weak_group_12       0.1544
fit_lcs_extra_main_strong_group_12     0.4845
# show model with varying latent change parameters 
# -> key parameter is "d_extra_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 1, b = Group 2)
params_main_lcs_extra_group_12 <- broom::tidy(fit_lcs_extra_main_weak_group_12, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="extra" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_extra_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_main_lcs_extra_group_12, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_extra_1 ~1 d_int_a 0.074 0.098 0.37 2.775 0.006
d_extra_1 ~1 d_int_b 0.088 0.116 0.29 2.739 0.006
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(fit_lcs_extra_main_strong_group_12, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="extra" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_extra_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_extra_1 ~1 d_int 0.08 0.105 0.398 3.831 0
d_extra_1 ~1 d_int 0.08 0.105 0.262 3.831 0
# model fit
kable(broom::glance(fit_lcs_extra_main_weak_group_12) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
354 28 3307.394 3415.734 0.999 0.021 0.047

Very similar change in extraversion in both groups. No significant differences according to the LRTs.

Agreeableness (current personality): results

# model comparison tests for measurement invariance
lavTestLRT(fit_lcs_agree_main_config_group_12, fit_lcs_agree_main_weak_group_12, fit_lcs_agree_main_strong_group_12)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                                   Df    AIC    BIC  Chisq Chisq diff Df diff
fit_lcs_agree_main_config_group_12 24 3161.8 3277.9 39.952                   
fit_lcs_agree_main_weak_group_12   26 3158.5 3266.8 40.567     0.5738       2
fit_lcs_agree_main_strong_group_12 30 3156.4 3249.2 46.473     6.0557       4
                                   Pr(>Chisq)
fit_lcs_agree_main_config_group_12           
fit_lcs_agree_main_weak_group_12       0.7506
fit_lcs_agree_main_strong_group_12     0.1950
# show model with varying latent change parameters 
# -> key parameter is "d_agree_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 1, b = Group 2)
params_main_lcs_agree_group_12 <- broom::tidy(fit_lcs_agree_main_weak_group_12, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="agree" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_agree_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_main_lcs_agree_group_12, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_agree_1 ~1 d_int_a 0.051 0.087 0.176 1.727 0.084
d_agree_1 ~1 d_int_b 0.047 0.079 0.168 1.623 0.105
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(fit_lcs_agree_main_strong_group_12, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="agree" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_agree_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_agree_1 ~1 d_int 0.051 0.086 0.174 2.427 0.015
d_agree_1 ~1 d_int 0.051 0.086 0.181 2.427 0.015
# model fit
kable(broom::glance(fit_lcs_agree_main_weak_group_12) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
354 28 3158.464 3266.804 0.986 0.056 0.052

Similar change in agreeableness in both groups. No significant differences according to the LRTs.

Conscientiousness (current personality): results

# model comparison tests for measurement invariance
lavTestLRT(fit_lcs_consc_main_config_group_12, fit_lcs_consc_main_weak_group_12, fit_lcs_consc_main_strong_group_12)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                                   Df    AIC    BIC  Chisq Chisq diff Df diff
fit_lcs_consc_main_config_group_12 24 3310.7 3426.8 27.774                   
fit_lcs_consc_main_weak_group_12   26 3307.5 3415.8 28.559     0.9105       2
fit_lcs_consc_main_strong_group_12 30 3302.7 3395.6 31.826     3.2967       4
                                   Pr(>Chisq)
fit_lcs_consc_main_config_group_12           
fit_lcs_consc_main_weak_group_12       0.6343
fit_lcs_consc_main_strong_group_12     0.5094
# show model with varying latent change parameters 
# -> key parameter is "d_consc_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 1, b = Group 2)
params_main_lcs_consc_group_12 <- broom::tidy(fit_lcs_consc_main_weak_group_12, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="consc" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_consc_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_main_lcs_consc_group_12, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_consc_1 ~1 d_int_a 0.099 0.131 0.326 2.925 0.003
d_consc_1 ~1 d_int_b 0.068 0.090 0.276 2.365 0.018
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(fit_lcs_consc_main_strong_group_12, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="consc" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_consc_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_consc_1 ~1 d_int 0.082 0.108 0.269 3.719 0
d_consc_1 ~1 d_int 0.082 0.108 0.329 3.719 0
# model fit
kable(broom::glance(fit_lcs_consc_main_weak_group_12) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
354 28 3307.458 3415.798 0.998 0.024 0.03

Similar change in conscientiousness in both groups (increase is slightly larger in Group 1 compared to Group 2). No significant differences according to the LRTs.

Neuroticism (current personality): results

# model comparison tests for measurement invariance
lavTestLRT(fit_lcs_neuro_main_config_group_12, fit_lcs_neuro_main_weak_group_12, fit_lcs_neuro_main_strong_group_12)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                                   Df    AIC    BIC  Chisq Chisq diff Df diff
fit_lcs_neuro_main_config_group_12 24 3307.8 3423.9 33.029                   
fit_lcs_neuro_main_weak_group_12   26 3303.9 3412.2 33.086     0.0598       2
fit_lcs_neuro_main_strong_group_12 30 3299.2 3392.1 36.416     3.3577       4
                                   Pr(>Chisq)
fit_lcs_neuro_main_config_group_12           
fit_lcs_neuro_main_weak_group_12       0.9705
fit_lcs_neuro_main_strong_group_12     0.4998
# show model with varying latent change parameters 
# -> key parameter is "d_neuro_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 1, b = Group 2)
params_main_lcs_neuro_group_12 <- broom::tidy(fit_lcs_neuro_main_weak_group_12, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="neuro" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_neuro_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_main_lcs_neuro_group_12, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_neuro_1 ~1 d_int_a -0.052 -0.059 -0.145 -1.436 0.151
d_neuro_1 ~1 d_int_b -0.128 -0.145 -0.462 -4.271 0.000
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(fit_lcs_neuro_main_strong_group_12, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="neuro" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_neuro_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_neuro_1 ~1 d_int -0.098 -0.11 -0.270 -4.066 0
d_neuro_1 ~1 d_int -0.098 -0.11 -0.349 -4.066 0
# model fit
kable(broom::glance(fit_lcs_neuro_main_weak_group_12) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
354 28 3303.86 3412.2 0.996 0.039 0.023

Small decreases in neuroticism in both groups. Decrease is larger in Group 2 compared to Group 1. No significant differences according to the LRTs.

Openness (current personality): results

# model comparison tests for measurement invariance
lavTestLRT(fit_lcs_openn_main_config_group_12, fit_lcs_openn_main_weak_group_12, fit_lcs_openn_main_strong_group_12)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                                   Df    AIC    BIC  Chisq Chisq diff Df diff
fit_lcs_openn_main_config_group_12 24 3369.4 3485.5 30.630                   
fit_lcs_openn_main_weak_group_12   26 3365.5 3473.9 30.784    0.14398       2
fit_lcs_openn_main_strong_group_12 30 3359.3 3452.1 32.526    1.71588       4
                                   Pr(>Chisq)
fit_lcs_openn_main_config_group_12           
fit_lcs_openn_main_weak_group_12       0.9305
fit_lcs_openn_main_strong_group_12     0.7878
# show model with varying latent change parameters 
# -> key parameter is "d_openn_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 1, b = Group 2)
params_main_lcs_openn_group_12 <- broom::tidy(fit_lcs_openn_main_weak_group_12, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="openn" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_openn_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_main_lcs_openn_group_12, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_openn_1 ~1 d_int_a 0.022 0.035 0.110 0.821 0.412
d_openn_1 ~1 d_int_b 0.044 0.068 0.221 1.798 0.072
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(fit_lcs_openn_main_strong_group_12, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="openn" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_openn_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_openn_1 ~1 d_int 0.034 0.052 0.167 1.797 0.072
d_openn_1 ~1 d_int 0.034 0.052 0.171 1.797 0.072
# model fit
kable(broom::glance(fit_lcs_openn_main_weak_group_12) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
354 28 3365.527 3473.867 0.996 0.032 0.036

Almost no change in current personality openness in both groups. No significant differences according to the LRTs.

7.1.2.3 Ideal personality traits

In Study 3, we can only examine this in Group 1, so no multi-group comparison of ideal personality possible.

7.2 Group 2 vs. Group 3: Effect of demand characteristics

7.2.1 Well-being change: differences across groups

7.2.1.1 Life satisfaction

Life satisfaction: fitting multi-group models

Show the code
# adapt latent change score model from above and add grouping factor in estimation (also add vectorized equality constraints to the model)

# configural invariance -> model template created above
lcs_swls_group_23_config <- sem(mi_lcs_swls_group_config, data=df_sbsa3_wide_wb %>% filter(group!="Group 1"), estimator='mlr', 
                             fixed.x=FALSE, missing='fiml', group = "group")

# weak invariance -> model template created above
lcs_swls_group_23_weak <- sem(mi_lcs_swls_group_weak, data=df_sbsa3_wide_wb %>% 
                                filter(group!="Group 1"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                              group = "group", group.equal = "loadings")

# strong invariance -> model template created above
lcs_swls_group_23_strong <- sem(mi_lcs_swls_group_strong, data=df_sbsa3_wide_wb %>% 
                                  filter(group!="Group 1"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                                group = "group", group.equal = c("intercepts", "loadings"))

Life satisfaction: results

# model comparison tests for measurement invariance
lavTestLRT(lcs_swls_group_23_config, lcs_swls_group_23_weak, lcs_swls_group_23_strong)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                         Df    AIC    BIC   Chisq Chisq diff Df diff Pr(>Chisq)
lcs_swls_group_23_config 50 6785.3 6932.4  85.341                              
lcs_swls_group_23_weak   53 6779.9 6915.4  85.974     0.6112       3    0.89386
lcs_swls_group_23_strong 58 6785.8 6902.0 101.870    15.7264       5    0.00767
                           
lcs_swls_group_23_config   
lcs_swls_group_23_weak     
lcs_swls_group_23_strong **
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# show model with varying latent change parameters 
# -> key parameter is "d_swls_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 2, b = Group 3)
params_lcs_swls_group_23_weak <- broom::tidy(lcs_swls_group_23_weak, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="swls" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_swls_1 ~1 ")) %>% mutate(sig_diff = "**")
kable(params_lcs_swls_group_23_weak, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_swls_1 ~1 d_int_a 0.255 0.253 0.511 4.850 0.00 **
d_swls_1 ~1 d_int_b 0.019 0.019 0.034 0.358 0.72 **
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(lcs_swls_group_23_strong, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="swls" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_swls_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_swls_1 ~1 d_int 0.14 0.14 0.275 3.377 0.001
d_swls_1 ~1 d_int 0.14 0.14 0.241 3.377 0.001

Significantly more positive change in life satisfaction in the Group 2 compared to Group 3 (difference significant according to the LRT). Change almost zero in Group 3.

# whole model (weak invariance)
summary(lcs_swls_group_23_weak, fit.measures=TRUE, standardized=TRUE, rsquare=F)
lavaan 0.6.15 ended normally after 53 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        58
  Number of equality constraints                    23

  Number of observations per group:                   
    Group 2                                        179
    Group 3                                        176
  Number of missing patterns per group:               
    Group 2                                          2
    Group 3                                          2

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                85.974      75.731
  Degrees of freedom                                53          53
  P-value (Chi-square)                           0.003       0.022
  Scaling correction factor                                  1.135
    Yuan-Bentler correction (Mplus variant)                       
  Test statistic for each group:
    Group 2                                     53.793      47.384
    Group 3                                     32.181      28.347

Model Test Baseline Model:

  Test statistic                              1932.415    1344.826
  Degrees of freedom                                56          56
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.437

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.982       0.982
  Tucker-Lewis Index (TLI)                       0.981       0.981
                                                                  
  Robust Comparative Fit Index (CFI)                         0.986
  Robust Tucker-Lewis Index (TLI)                            0.985

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -3354.947   -3354.947
  Scaling correction factor                                  0.757
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -3311.960   -3311.960
  Scaling correction factor                                  1.183
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                6779.893    6779.893
  Bayesian (BIC)                              6915.417    6915.417
  Sample-size adjusted Bayesian (SABIC)       6804.382    6804.382

Root Mean Square Error of Approximation:

  RMSEA                                          0.059       0.049
  90 Percent confidence interval - lower         0.035       0.022
  90 Percent confidence interval - upper         0.081       0.071
  P-value H_0: RMSEA <= 0.050                    0.242       0.502
  P-value H_0: RMSEA >= 0.080                    0.063       0.009
                                                                  
  Robust RMSEA                                               0.055
  90 Percent confidence interval - lower                     0.021
  90 Percent confidence interval - upper                     0.081
  P-value H_0: Robust RMSEA <= 0.050                         0.371
  P-value H_0: Robust RMSEA >= 0.080                         0.061

Standardized Root Mean Square Residual:

  SRMR                                           0.040       0.040

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian


Group 1 [Group 2]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t1 =~                                                            
    s06_01_           1.000                               0.968    0.815
    s06_02_ (lmb2)    0.956    0.040   23.954    0.000    0.925    0.788
    s06_03_ (lmb3)    1.084    0.050   21.826    0.000    1.050    0.868
    s06_04_ (lmb4)    0.906    0.057   15.926    0.000    0.877    0.718
  swls_t2 =~                                                            
    s06_01_           1.000                               0.975    0.817
    s06_02_ (lmb2)    0.956    0.040   23.954    0.000    0.932    0.790
    s06_03_ (lmb3)    1.084    0.050   21.826    0.000    1.058    0.870
    s06_04_ (lmb4)    0.906    0.057   15.926    0.000    0.884    0.721
  d_swls_1 =~                                                           
    swls_t2           1.000                               0.511    0.511

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t2 ~                                                             
    swls_t1           1.000                               0.993    0.993

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t1 ~~                                                            
    d_swl_1 (fb_a)   -0.117    0.050   -2.334    0.020   -0.243   -0.243
 .sw06_01_t1 ~~                                                         
   .s06_01_ (cov1)    0.190    0.058    3.268    0.001    0.190    0.400
 .sw06_02_t1 ~~                                                         
   .s06_02_ (cov2)    0.206    0.061    3.395    0.001    0.206    0.395
 .sw06_03_t1 ~~                                                         
   .s06_03_ (cov3)    0.016    0.054    0.292    0.770    0.016    0.044
 .sw06_04_t1 ~~                                                         
   .s06_04_ (cov4)    0.199    0.074    2.674    0.007    0.199    0.275

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .swls_t2           0.000                               0.000    0.000
    d_swl_1 (d_n_)    0.255    0.052    4.850    0.000    0.511    0.511
    swls_t1 (wb__)    2.546    0.087   29.366    0.000    2.630    2.630
   .s06_01_           0.000                               0.000    0.000
   .s06_02_  (m2a)    0.278    0.123    2.266    0.023    0.278    0.237
   .s06_03_  (m3a)    0.062    0.144    0.431    0.666    0.062    0.051
   .s06_04_  (m4a)    0.437    0.165    2.650    0.008    0.437    0.358
   .s06_01_           0.000                               0.000    0.000
   .s06_02_  (m2a)    0.278    0.123    2.266    0.023    0.278    0.236
   .s06_03_  (m3a)    0.062    0.144    0.431    0.666    0.062    0.051
   .s06_04_  (m4a)    0.437    0.165    2.650    0.008    0.437    0.356

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .swls_t2           0.000                               0.000    0.000
    d_swl_1 (d_v_)    0.248    0.055    4.490    0.000    1.000    1.000
    swls_t1 (wb__)    0.937    0.104    9.029    0.000    1.000    1.000
   .s06_01_ (res1)    0.474    0.063    7.529    0.000    0.474    0.336
   .s06_02_ (res2)    0.523    0.061    8.561    0.000    0.523    0.379
   .s06_03_ (res3)    0.361    0.055    6.528    0.000    0.361    0.247
   .s06_04_ (res4)    0.722    0.078    9.252    0.000    0.722    0.484
   .s06_01_ (res1)    0.474    0.063    7.529    0.000    0.474    0.333
   .s06_02_ (res2)    0.523    0.061    8.561    0.000    0.523    0.376
   .s06_03_ (res3)    0.361    0.055    6.528    0.000    0.361    0.244
   .s06_04_ (res4)    0.722    0.078    9.252    0.000    0.722    0.480


Group 2 [Group 3]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t1 =~                                                            
    s06_01_           1.000                               0.983    0.812
    s06_02_ (lmb2)    0.956    0.040   23.954    0.000    0.940    0.802
    s06_03_ (lmb3)    1.084    0.050   21.826    0.000    1.066    0.887
    s06_04_ (lmb4)    0.906    0.057   15.926    0.000    0.891    0.718
  swls_t2 =~                                                            
    s06_01_           1.000                               1.020    0.822
    s06_02_ (lmb2)    0.956    0.040   23.954    0.000    0.975    0.812
    s06_03_ (lmb3)    1.084    0.050   21.826    0.000    1.106    0.894
    s06_04_ (lmb4)    0.906    0.057   15.926    0.000    0.925    0.731
  d_swls_1 =~                                                           
    swls_t2           1.000                               0.556    0.556

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t2 ~                                                             
    swls_t1           1.000                               0.964    0.964

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  swls_t1 ~~                                                            
    d_swl_1 (fb_b)   -0.124    0.069   -1.804    0.071   -0.222   -0.222
 .sw06_01_t1 ~~                                                         
   .s06_01_ (cv1b)    0.163    0.062    2.615    0.009    0.163    0.327
 .sw06_02_t1 ~~                                                         
   .s06_02_ (cv2b)    0.181    0.065    2.780    0.005    0.181    0.369
 .sw06_03_t1 ~~                                                         
   .s06_03_ (cv3b)    0.090    0.052    1.728    0.084    0.090    0.291
 .sw06_04_t1 ~~                                                         
   .s06_04_ (cv4b)    0.404    0.091    4.429    0.000    0.404    0.541

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .swls_t2           0.000                               0.000    0.000
    d_swl_1 (d_n_)    0.019    0.054    0.358    0.720    0.034    0.034
    swls_t1 (wb__)    2.914    0.089   32.652    0.000    2.963    2.963
   .s06_01_           0.000                               0.000    0.000
   .s06_02_  (m2b)    0.257    0.141    1.831    0.067    0.257    0.220
   .s06_03_  (m3b)   -0.048    0.163   -0.291    0.771   -0.048   -0.040
   .s06_04_  (m4b)    0.406    0.186    2.181    0.029    0.406    0.327
   .s06_01_           0.000                               0.000    0.000
   .s06_02_  (m2b)    0.257    0.141    1.831    0.067    0.257    0.214
   .s06_03_  (m3b)   -0.048    0.163   -0.291    0.771   -0.048   -0.038
   .s06_04_  (m4b)    0.406    0.186    2.181    0.029    0.406    0.321

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .swls_t2           0.000                               0.000    0.000
    d_swl_1 (d_v_)    0.322    0.105    3.053    0.002    1.000    1.000
    swls_t1 (wb__)    0.967    0.106    9.080    0.000    1.000    1.000
   .s06_01_ (rs1b)    0.498    0.071    7.022    0.000    0.498    0.340
   .s06_02_ (rs2b)    0.491    0.061    8.053    0.000    0.491    0.357
   .s06_03_ (rs3b)    0.309    0.063    4.919    0.000    0.309    0.214
   .s06_04_ (rs4b)    0.747    0.096    7.800    0.000    0.747    0.485
   .s06_01_ (rs1b)    0.498    0.071    7.022    0.000    0.498    0.324
   .s06_02_ (rs2b)    0.491    0.061    8.053    0.000    0.491    0.341
   .s06_03_ (rs3b)    0.309    0.063    4.919    0.000    0.309    0.201
   .s06_04_ (rs4b)    0.747    0.096    7.800    0.000    0.747    0.466
7.2.1.2 Meaning in life

Meaning in life: fitting multi-group models

Show the code
# adapt latent change score model from above and add grouping factor in estimation (also add vectorized equality constraints to the model)

# configural invariance -> model template created above
lcs_meaning_group_23_config <- sem(mi_lcs_meaning_group_config, data=df_sbsa3_wide_wb %>% filter(group!="Group 1"), estimator='mlr', 
                                fixed.x=FALSE, missing='fiml', group = "group")

# weak invariance -> model template created above
lcs_meaning_group_23_weak <- sem(mi_lcs_meaning_group_weak, data=df_sbsa3_wide_wb %>% 
                                   filter(group!="Group 1"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                                 group = "group", group.equal = "loadings")

# strong invariance -> model template created above
lcs_meaning_group_23_strong <- sem(mi_lcs_meaning_group_strong, data=df_sbsa3_wide_wb %>% 
                                     filter(group!="Group 1"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                                   group = "group", group.equal = c("intercepts", "loadings"))

Meaning in life: results

# model comparison tests for measurement invariance
lavTestLRT(lcs_meaning_group_23_config, lcs_meaning_group_23_weak, lcs_meaning_group_23_strong)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                            Df   AIC   BIC  Chisq Chisq diff Df diff Pr(>Chisq)
lcs_meaning_group_23_config 84 10324 10503 122.12                              
lcs_meaning_group_23_weak   88 10318 10481 124.09     2.0518       4     0.7262
lcs_meaning_group_23_strong 94 10317 10456 134.52    10.2897       6     0.1130
# show model with varying latent change parameters 
# -> key parameter is "d_meaning_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 2, b = Group 3)
params_lcs_meaning_group_23_weak <- broom::tidy(lcs_meaning_group_23_weak, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="meaning" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_meaning_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_lcs_meaning_group_23_weak, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_meaning_1 ~1 d_int_a 0.079 0.053 0.094 0.993 0.321
d_meaning_1 ~1 d_int_b 0.007 0.005 0.009 0.100 0.920
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(lcs_meaning_group_23_strong, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="meaning" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_meaning_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_meaning_1 ~1 d_int 0.038 0.026 0.045 0.704 0.481
d_meaning_1 ~1 d_int 0.038 0.026 0.048 0.704 0.481

Slightly more positive change in meaning in life in the Group 2 compared to Group 3 but no substantial differences according to the LRTs and not significantly different from zero in all models at p < .01.

# whole model (weak invariance)
summary(lcs_meaning_group_23_weak, fit.measures=TRUE, standardized=TRUE, rsquare=F)
lavaan 0.6.15 ended normally after 90 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        72
  Number of equality constraints                    30

  Number of observations per group:                   
    Group 2                                        179
    Group 3                                        176
  Number of missing patterns per group:               
    Group 2                                          2
    Group 3                                          2

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                               124.093      97.700
  Degrees of freedom                                88          88
  P-value (Chi-square)                           0.007       0.225
  Scaling correction factor                                  1.270
    Yuan-Bentler correction (Mplus variant)                       
  Test statistic for each group:
    Group 2                                     71.284      56.123
    Group 3                                     52.809      41.577

Model Test Baseline Model:

  Test statistic                              2927.980    1950.089
  Degrees of freedom                                90          90
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.501

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.987       0.995
  Tucker-Lewis Index (TLI)                       0.987       0.995
                                                                  
  Robust Comparative Fit Index (CFI)                         0.996
  Robust Tucker-Lewis Index (TLI)                            0.996

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -5117.261   -5117.261
  Scaling correction factor                                  0.758
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -5055.215   -5055.215
  Scaling correction factor                                  1.280
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                               10318.523   10318.523
  Bayesian (BIC)                             10481.152   10481.152
  Sample-size adjusted Bayesian (SABIC)      10347.910   10347.910

Root Mean Square Error of Approximation:

  RMSEA                                          0.048       0.025
  90 Percent confidence interval - lower         0.026       0.000
  90 Percent confidence interval - upper         0.067       0.047
  P-value H_0: RMSEA <= 0.050                    0.547       0.973
  P-value H_0: RMSEA >= 0.080                    0.002       0.000
                                                                  
  Robust RMSEA                                               0.027
  90 Percent confidence interval - lower                     0.000
  90 Percent confidence interval - upper                     0.057
  P-value H_0: Robust RMSEA <= 0.050                         0.881
  P-value H_0: Robust RMSEA >= 0.080                         0.000

Standardized Root Mean Square Residual:

  SRMR                                           0.040       0.040

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian


Group 1 [Group 2]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 =~                                                         
    m01_01_           1.000                               1.368    0.817
    m01_04_ (lmb2)    1.050    0.033   31.533    0.000    1.437    0.882
    m01_05_ (lmb3)    0.984    0.038   25.780    0.000    1.347    0.828
    m01_06_ (lmb4)    1.065    0.032   32.872    0.000    1.457    0.852
    m01_09_ (lmb5)   -1.043    0.051  -20.481    0.000   -1.427   -0.728
  meaning_t2 =~                                                         
    m01_01_           1.000                               1.339    0.811
    m01_04_ (lmb2)    1.050    0.033   31.533    0.000    1.406    0.878
    m01_05_ (lmb3)    0.984    0.038   25.780    0.000    1.318    0.822
    m01_06_ (lmb4)    1.065    0.032   32.872    0.000    1.426    0.846
    m01_09_ (lmb5)   -1.043    0.051  -20.481    0.000   -1.397   -0.721
  d_meaning_1 =~                                                        
    mnng_t2           1.000                               0.627    0.627

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t2 ~                                                          
    meaning_t1        1.000                               1.022    1.022

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 ~~                                                         
    d_mnn_1 (fb_a)   -0.392    0.126   -3.102    0.002   -0.341   -0.341
 .ml01_01_t1 ~~                                                         
   .m01_01_ (cov1)    0.237    0.085    2.773    0.006    0.237    0.253
 .ml01_04_t1 ~~                                                         
   .m01_04_ (cov2)    0.009    0.092    0.103    0.918    0.009    0.016
 .ml01_05_t1 ~~                                                         
   .m01_05_ (cov3)    0.339    0.105    3.229    0.001    0.339    0.406
 .ml01_06_t1 ~~                                                         
   .m01_06_ (cov4)    0.174    0.084    2.059    0.039    0.174    0.216
 .ml01_09_t1 ~~                                                         
   .m01_09_ (cov5)    0.414    0.214    1.930    0.054    0.414    0.229

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_n_)    0.079    0.080    0.993    0.321    0.094    0.094
    mnng_t1 (wb__)    4.152    0.120   34.680    0.000    3.035    3.035
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2a)   -0.490    0.158   -3.099    0.002   -0.490   -0.301
   .m01_05_  (m3a)    0.501    0.195    2.565    0.010    0.501    0.308
   .m01_06_  (m4a)   -0.336    0.161   -2.083    0.037   -0.336   -0.197
   .m01_09_  (m5a)    8.130    0.231   35.143    0.000    8.130    4.148
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2a)   -0.490    0.158   -3.099    0.002   -0.490   -0.306
   .m01_05_  (m3a)    0.501    0.195    2.565    0.010    0.501    0.313
   .m01_06_  (m4a)   -0.336    0.161   -2.083    0.037   -0.336   -0.200
   .m01_09_  (m5a)    8.130    0.231   35.143    0.000    8.130    4.196

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_v_)    0.706    0.135    5.230    0.000    1.000    1.000
    mnng_t1 (wb__)    1.872    0.182   10.265    0.000    1.000    1.000
   .m01_01_ (res1)    0.936    0.104    9.030    0.000    0.936    0.333
   .m01_04_ (res2)    0.589    0.096    6.155    0.000    0.589    0.222
   .m01_05_ (res3)    0.833    0.109    7.658    0.000    0.833    0.315
   .m01_06_ (res4)    0.805    0.112    7.201    0.000    0.805    0.275
   .m01_09_ (res5)    1.805    0.237    7.617    0.000    1.805    0.470
   .m01_01_ (res1)    0.936    0.104    9.030    0.000    0.936    0.343
   .m01_04_ (res2)    0.589    0.096    6.155    0.000    0.589    0.230
   .m01_05_ (res3)    0.833    0.109    7.658    0.000    0.833    0.324
   .m01_06_ (res4)    0.805    0.112    7.201    0.000    0.805    0.284
   .m01_09_ (res5)    1.805    0.237    7.617    0.000    1.805    0.481


Group 2 [Group 3]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 =~                                                         
    m01_01_           1.000                               1.426    0.878
    m01_04_ (lmb2)    1.050    0.033   31.533    0.000    1.497    0.903
    m01_05_ (lmb3)    0.984    0.038   25.780    0.000    1.404    0.852
    m01_06_ (lmb4)    1.065    0.032   32.872    0.000    1.518    0.881
    m01_09_ (lmb5)   -1.043    0.051  -20.481    0.000   -1.487   -0.750
  meaning_t2 =~                                                         
    m01_01_           1.000                               1.479    0.885
    m01_04_ (lmb2)    1.050    0.033   31.533    0.000    1.553    0.909
    m01_05_ (lmb3)    0.984    0.038   25.780    0.000    1.456    0.860
    m01_06_ (lmb4)    1.065    0.032   32.872    0.000    1.575    0.888
    m01_09_ (lmb5)   -1.043    0.051  -20.481    0.000   -1.543   -0.762
  d_meaning_1 =~                                                        
    mnng_t2           1.000                               0.540    0.540

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t2 ~                                                          
    meaning_t1        1.000                               0.964    0.964

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 ~~                                                         
    d_mnn_1 (fb_b)   -0.242    0.103   -2.337    0.019   -0.212   -0.212
 .ml01_01_t1 ~~                                                         
   .m01_01_ (cv1b)    0.090    0.070    1.286    0.198    0.090    0.149
 .ml01_04_t1 ~~                                                         
   .m01_04_ (cv2b)   -0.049    0.072   -0.680    0.497   -0.049   -0.097
 .ml01_05_t1 ~~                                                         
   .m01_05_ (cv3b)    0.185    0.088    2.113    0.035    0.185    0.248
 .ml01_06_t1 ~~                                                         
   .m01_06_ (cv4b)    0.192    0.082    2.340    0.019    0.192    0.289
 .ml01_09_t1 ~~                                                         
   .m01_09_ (cv5b)    0.553    0.179    3.092    0.002    0.553    0.322

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_n_)    0.007    0.073    0.100    0.920    0.009    0.009
    mnng_t1 (wb__)    4.429    0.120   37.061    0.000    3.106    3.106
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2b)   -0.432    0.169   -2.562    0.010   -0.432   -0.261
   .m01_05_  (m3b)    0.341    0.194    1.755    0.079    0.341    0.207
   .m01_06_  (m4b)   -0.483    0.162   -2.982    0.003   -0.483   -0.280
   .m01_09_  (m5b)    8.256    0.243   34.034    0.000    8.256    4.166
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2b)   -0.432    0.169   -2.562    0.010   -0.432   -0.253
   .m01_05_  (m3b)    0.341    0.194    1.755    0.079    0.341    0.202
   .m01_06_  (m4b)   -0.483    0.162   -2.982    0.003   -0.483   -0.272
   .m01_09_  (m5b)    8.256    0.243   34.034    0.000    8.256    4.079

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_v_)    0.638    0.111    5.751    0.000    1.000    1.000
    mnng_t1 (wb__)    2.033    0.224    9.069    0.000    1.000    1.000
   .m01_01_ (rs1b)    0.605    0.078    7.791    0.000    0.605    0.229
   .m01_04_ (rs2b)    0.507    0.086    5.915    0.000    0.507    0.184
   .m01_05_ (rs3b)    0.746    0.116    6.436    0.000    0.746    0.275
   .m01_06_ (rs4b)    0.665    0.081    8.211    0.000    0.665    0.224
   .m01_09_ (rs5b)    1.717    0.216    7.941    0.000    1.717    0.437
   .m01_01_ (rs1b)    0.605    0.078    7.791    0.000    0.605    0.217
   .m01_04_ (rs2b)    0.507    0.086    5.915    0.000    0.507    0.174
   .m01_05_ (rs3b)    0.746    0.116    6.436    0.000    0.746    0.260
   .m01_06_ (rs4b)    0.665    0.081    8.211    0.000    0.665    0.211
   .m01_09_ (rs5b)    1.717    0.216    7.941    0.000    1.717    0.419
7.2.1.2.1 Search for Meaning

Search for Meaning in life (see preregistration deviations): fitting multi-group models

Show the code
# adapt latent change score model from above and add grouping factor in estimation (also add vectorized equality constraints to the model)

# configural invariance -> model template created above
lcs_search_group_23_config <- sem(mi_lcs_search_group_config, data=df_sbsa3_wide_wb %>% filter(group!="Group 1"), estimator='mlr', 
                                fixed.x=FALSE, missing='fiml', group = "group")

# weak invariance -> model template created above
lcs_search_group_23_weak <- sem(mi_lcs_search_group_weak, data=df_sbsa3_wide_wb %>% 
                                   filter(group!="Group 1"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                                 group = "group", group.equal = "loadings")

# strong invariance -> model template created above
lcs_search_group_23_strong <- sem(mi_lcs_search_group_strong, data=df_sbsa3_wide_wb %>% 
                                     filter(group!="Group 1"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                                   group = "group", group.equal = c("intercepts", "loadings"))

Meaning in life: results

# model comparison tests for measurement invariance
lavTestLRT(lcs_search_group_23_config, lcs_search_group_23_weak, lcs_search_group_23_strong)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                           Df   AIC   BIC  Chisq Chisq diff Df diff Pr(>Chisq)
lcs_search_group_23_config 84 10150 10328 243.50                              
lcs_search_group_23_weak   88 10161 10324 262.17    19.8749       4  0.0005286
lcs_search_group_23_strong 94 10153 10293 266.60     4.4316       6  0.6184816
                              
lcs_search_group_23_config    
lcs_search_group_23_weak   ***
lcs_search_group_23_strong    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# show model with varying latent change parameters 
# -> key parameter is "d_search_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 2, b = Group 3)
params_lcs_search_group_23_weak <- broom::tidy(lcs_search_group_23_weak, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="search" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_search_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_lcs_search_group_23_weak, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_search_1 ~1 d_int_a -0.151 -0.106 -0.166 -1.831 0.067
d_search_1 ~1 d_int_b -0.147 -0.104 -0.158 -1.786 0.074
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(lcs_search_group_23_strong, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="search" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_search_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_search_1 ~1 d_int -0.149 -0.105 -0.165 -2.538 0.011
d_search_1 ~1 d_int -0.149 -0.105 -0.159 -2.538 0.011

Similar decreases in search for meaning in life in Group 2 compared to Group 3 but no substantial differences according to the LRTs and not significantly different from zero in all models at p < .01.

# whole model (weak invariance)
summary(lcs_meaning_group_23_weak, fit.measures=TRUE, standardized=TRUE, rsquare=F)
lavaan 0.6.15 ended normally after 90 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        72
  Number of equality constraints                    30

  Number of observations per group:                   
    Group 2                                        179
    Group 3                                        176
  Number of missing patterns per group:               
    Group 2                                          2
    Group 3                                          2

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                               124.093      97.700
  Degrees of freedom                                88          88
  P-value (Chi-square)                           0.007       0.225
  Scaling correction factor                                  1.270
    Yuan-Bentler correction (Mplus variant)                       
  Test statistic for each group:
    Group 2                                     71.284      56.123
    Group 3                                     52.809      41.577

Model Test Baseline Model:

  Test statistic                              2927.980    1950.089
  Degrees of freedom                                90          90
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.501

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.987       0.995
  Tucker-Lewis Index (TLI)                       0.987       0.995
                                                                  
  Robust Comparative Fit Index (CFI)                         0.996
  Robust Tucker-Lewis Index (TLI)                            0.996

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -5117.261   -5117.261
  Scaling correction factor                                  0.758
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -5055.215   -5055.215
  Scaling correction factor                                  1.280
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                               10318.523   10318.523
  Bayesian (BIC)                             10481.152   10481.152
  Sample-size adjusted Bayesian (SABIC)      10347.910   10347.910

Root Mean Square Error of Approximation:

  RMSEA                                          0.048       0.025
  90 Percent confidence interval - lower         0.026       0.000
  90 Percent confidence interval - upper         0.067       0.047
  P-value H_0: RMSEA <= 0.050                    0.547       0.973
  P-value H_0: RMSEA >= 0.080                    0.002       0.000
                                                                  
  Robust RMSEA                                               0.027
  90 Percent confidence interval - lower                     0.000
  90 Percent confidence interval - upper                     0.057
  P-value H_0: Robust RMSEA <= 0.050                         0.881
  P-value H_0: Robust RMSEA >= 0.080                         0.000

Standardized Root Mean Square Residual:

  SRMR                                           0.040       0.040

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian


Group 1 [Group 2]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 =~                                                         
    m01_01_           1.000                               1.368    0.817
    m01_04_ (lmb2)    1.050    0.033   31.533    0.000    1.437    0.882
    m01_05_ (lmb3)    0.984    0.038   25.780    0.000    1.347    0.828
    m01_06_ (lmb4)    1.065    0.032   32.872    0.000    1.457    0.852
    m01_09_ (lmb5)   -1.043    0.051  -20.481    0.000   -1.427   -0.728
  meaning_t2 =~                                                         
    m01_01_           1.000                               1.339    0.811
    m01_04_ (lmb2)    1.050    0.033   31.533    0.000    1.406    0.878
    m01_05_ (lmb3)    0.984    0.038   25.780    0.000    1.318    0.822
    m01_06_ (lmb4)    1.065    0.032   32.872    0.000    1.426    0.846
    m01_09_ (lmb5)   -1.043    0.051  -20.481    0.000   -1.397   -0.721
  d_meaning_1 =~                                                        
    mnng_t2           1.000                               0.627    0.627

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t2 ~                                                          
    meaning_t1        1.000                               1.022    1.022

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 ~~                                                         
    d_mnn_1 (fb_a)   -0.392    0.126   -3.102    0.002   -0.341   -0.341
 .ml01_01_t1 ~~                                                         
   .m01_01_ (cov1)    0.237    0.085    2.773    0.006    0.237    0.253
 .ml01_04_t1 ~~                                                         
   .m01_04_ (cov2)    0.009    0.092    0.103    0.918    0.009    0.016
 .ml01_05_t1 ~~                                                         
   .m01_05_ (cov3)    0.339    0.105    3.229    0.001    0.339    0.406
 .ml01_06_t1 ~~                                                         
   .m01_06_ (cov4)    0.174    0.084    2.059    0.039    0.174    0.216
 .ml01_09_t1 ~~                                                         
   .m01_09_ (cov5)    0.414    0.214    1.930    0.054    0.414    0.229

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_n_)    0.079    0.080    0.993    0.321    0.094    0.094
    mnng_t1 (wb__)    4.152    0.120   34.680    0.000    3.035    3.035
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2a)   -0.490    0.158   -3.099    0.002   -0.490   -0.301
   .m01_05_  (m3a)    0.501    0.195    2.565    0.010    0.501    0.308
   .m01_06_  (m4a)   -0.336    0.161   -2.083    0.037   -0.336   -0.197
   .m01_09_  (m5a)    8.130    0.231   35.143    0.000    8.130    4.148
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2a)   -0.490    0.158   -3.099    0.002   -0.490   -0.306
   .m01_05_  (m3a)    0.501    0.195    2.565    0.010    0.501    0.313
   .m01_06_  (m4a)   -0.336    0.161   -2.083    0.037   -0.336   -0.200
   .m01_09_  (m5a)    8.130    0.231   35.143    0.000    8.130    4.196

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_v_)    0.706    0.135    5.230    0.000    1.000    1.000
    mnng_t1 (wb__)    1.872    0.182   10.265    0.000    1.000    1.000
   .m01_01_ (res1)    0.936    0.104    9.030    0.000    0.936    0.333
   .m01_04_ (res2)    0.589    0.096    6.155    0.000    0.589    0.222
   .m01_05_ (res3)    0.833    0.109    7.658    0.000    0.833    0.315
   .m01_06_ (res4)    0.805    0.112    7.201    0.000    0.805    0.275
   .m01_09_ (res5)    1.805    0.237    7.617    0.000    1.805    0.470
   .m01_01_ (res1)    0.936    0.104    9.030    0.000    0.936    0.343
   .m01_04_ (res2)    0.589    0.096    6.155    0.000    0.589    0.230
   .m01_05_ (res3)    0.833    0.109    7.658    0.000    0.833    0.324
   .m01_06_ (res4)    0.805    0.112    7.201    0.000    0.805    0.284
   .m01_09_ (res5)    1.805    0.237    7.617    0.000    1.805    0.481


Group 2 [Group 3]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 =~                                                         
    m01_01_           1.000                               1.426    0.878
    m01_04_ (lmb2)    1.050    0.033   31.533    0.000    1.497    0.903
    m01_05_ (lmb3)    0.984    0.038   25.780    0.000    1.404    0.852
    m01_06_ (lmb4)    1.065    0.032   32.872    0.000    1.518    0.881
    m01_09_ (lmb5)   -1.043    0.051  -20.481    0.000   -1.487   -0.750
  meaning_t2 =~                                                         
    m01_01_           1.000                               1.479    0.885
    m01_04_ (lmb2)    1.050    0.033   31.533    0.000    1.553    0.909
    m01_05_ (lmb3)    0.984    0.038   25.780    0.000    1.456    0.860
    m01_06_ (lmb4)    1.065    0.032   32.872    0.000    1.575    0.888
    m01_09_ (lmb5)   -1.043    0.051  -20.481    0.000   -1.543   -0.762
  d_meaning_1 =~                                                        
    mnng_t2           1.000                               0.540    0.540

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t2 ~                                                          
    meaning_t1        1.000                               0.964    0.964

Covariances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  meaning_t1 ~~                                                         
    d_mnn_1 (fb_b)   -0.242    0.103   -2.337    0.019   -0.212   -0.212
 .ml01_01_t1 ~~                                                         
   .m01_01_ (cv1b)    0.090    0.070    1.286    0.198    0.090    0.149
 .ml01_04_t1 ~~                                                         
   .m01_04_ (cv2b)   -0.049    0.072   -0.680    0.497   -0.049   -0.097
 .ml01_05_t1 ~~                                                         
   .m01_05_ (cv3b)    0.185    0.088    2.113    0.035    0.185    0.248
 .ml01_06_t1 ~~                                                         
   .m01_06_ (cv4b)    0.192    0.082    2.340    0.019    0.192    0.289
 .ml01_09_t1 ~~                                                         
   .m01_09_ (cv5b)    0.553    0.179    3.092    0.002    0.553    0.322

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_n_)    0.007    0.073    0.100    0.920    0.009    0.009
    mnng_t1 (wb__)    4.429    0.120   37.061    0.000    3.106    3.106
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2b)   -0.432    0.169   -2.562    0.010   -0.432   -0.261
   .m01_05_  (m3b)    0.341    0.194    1.755    0.079    0.341    0.207
   .m01_06_  (m4b)   -0.483    0.162   -2.982    0.003   -0.483   -0.280
   .m01_09_  (m5b)    8.256    0.243   34.034    0.000    8.256    4.166
   .m01_01_           0.000                               0.000    0.000
   .m01_04_  (m2b)   -0.432    0.169   -2.562    0.010   -0.432   -0.253
   .m01_05_  (m3b)    0.341    0.194    1.755    0.079    0.341    0.202
   .m01_06_  (m4b)   -0.483    0.162   -2.982    0.003   -0.483   -0.272
   .m01_09_  (m5b)    8.256    0.243   34.034    0.000    8.256    4.079

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .mnng_t2           0.000                               0.000    0.000
    d_mnn_1 (d_v_)    0.638    0.111    5.751    0.000    1.000    1.000
    mnng_t1 (wb__)    2.033    0.224    9.069    0.000    1.000    1.000
   .m01_01_ (rs1b)    0.605    0.078    7.791    0.000    0.605    0.229
   .m01_04_ (rs2b)    0.507    0.086    5.915    0.000    0.507    0.184
   .m01_05_ (rs3b)    0.746    0.116    6.436    0.000    0.746    0.275
   .m01_06_ (rs4b)    0.665    0.081    8.211    0.000    0.665    0.224
   .m01_09_ (rs5b)    1.717    0.216    7.941    0.000    1.717    0.437
   .m01_01_ (rs1b)    0.605    0.078    7.791    0.000    0.605    0.217
   .m01_04_ (rs2b)    0.507    0.086    5.915    0.000    0.507    0.174
   .m01_05_ (rs3b)    0.746    0.116    6.436    0.000    0.746    0.260
   .m01_06_ (rs4b)    0.665    0.081    8.211    0.000    0.665    0.211
   .m01_09_ (rs5b)    1.717    0.216    7.941    0.000    1.717    0.419
7.2.1.3 Self-esteem

Self-esteem: fitting multi-group models

Show the code
# adapt latent change score model from above and add grouping factor in estimation (also add vectorized equality constraints to the model)

# configural invariance -> model template created above
lcs_selfes_group_23_config <- sem(mi_lcs_selfes_group_config, data=df_sbsa3_wide_wb %>% 
                                    filter(group!="Group 1"), estimator='mlr', 
                                fixed.x=FALSE, missing='fiml', group = "group")

# weak invariance -> model template created above
lcs_selfes_group_23_weak <- sem(mi_lcs_selfes_group_weak, data=df_sbsa3_wide_wb %>% 
                                  filter(group!="Group 1"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                                group = "group", group.equal = "loadings")

# strong invariance -> model template created above
lcs_selfes_group_23_strong <- sem(mi_lcs_selfes_group_strong, data=df_sbsa3_wide_wb %>% 
                                    filter(group!="Group 1"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                                  group = "group", group.equal = c("intercepts", "loadings"))

Self-esteem: results

# model comparison tests for measurement invariance
lavTestLRT(lcs_selfes_group_23_config, lcs_selfes_group_23_weak, lcs_selfes_group_23_strong)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                           Df    AIC    BIC  Chisq Chisq diff Df diff
lcs_selfes_group_23_config 24 3712.6 3828.7 23.270                   
lcs_selfes_group_23_weak   26 3711.2 3819.6 25.907     2.5162       2
lcs_selfes_group_23_strong 30 3719.4 3812.3 42.088    15.7913       4
                           Pr(>Chisq)   
lcs_selfes_group_23_config              
lcs_selfes_group_23_weak     0.284188   
lcs_selfes_group_23_strong   0.003312 **
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# show model with varying latent change parameters 
# -> key parameter is "d_selfes_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 2, b = Group 3)
params_lcs_selfes_group_23_weak <- broom::tidy(lcs_selfes_group_23_weak, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="selfes" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_selfes_1 ~1 ")) %>% mutate(sig_diff = "**")
kable(params_lcs_selfes_group_23_weak, digits = 3)
term label estimate beta std.all statistic p.value sig_diff
d_selfes_1 ~1 d_int_a 0.164 0.185 0.399 4.161 0.000 **
d_selfes_1 ~1 d_int_b 0.009 0.011 0.028 0.283 0.777 **
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(lcs_selfes_group_23_strong, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="selfes" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_selfes_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_selfes_1 ~1 d_int 0.07 0.079 0.166 2.517 0.012
d_selfes_1 ~1 d_int 0.07 0.079 0.204 2.517 0.012

Significant positive change in self-esteem in the Group 2 but no change in Grouo 3. Significant difference according to the LRT.

# whole model (weak invariance)
summary(lcs_selfes_group_23_weak, fit.measures=TRUE, standardized=TRUE, rsquare=F)
lavaan 0.6.15 ended normally after 69 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        44
  Number of equality constraints                    16

  Number of observations per group:                   
    Group 2                                        179
    Group 3                                        176
  Number of missing patterns per group:               
    Group 2                                          2
    Group 3                                          2

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                25.907      24.547
  Degrees of freedom                                26          26
  P-value (Chi-square)                           0.468       0.545
  Scaling correction factor                                  1.055
    Yuan-Bentler correction (Mplus variant)                       
  Test statistic for each group:
    Group 2                                     16.497      15.631
    Group 3                                      9.409       8.915

Model Test Baseline Model:

  Test statistic                              1951.407    1562.615
  Degrees of freedom                                30          30
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.249

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    1.000       1.000
  Tucker-Lewis Index (TLI)                       1.000       1.001
                                                                  
  Robust Comparative Fit Index (CFI)                         1.000
  Robust Tucker-Lewis Index (TLI)                            1.001

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1827.611   -1827.611
  Scaling correction factor                                  0.686
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -1814.658   -1814.658
  Scaling correction factor                                  1.067
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                3711.222    3711.222
  Bayesian (BIC)                              3819.641    3819.641
  Sample-size adjusted Bayesian (SABIC)       3730.813    3730.813

Root Mean Square Error of Approximation:

  RMSEA                                          0.000       0.000
  90 Percent confidence interval - lower         0.000       0.000
  90 Percent confidence interval - upper         0.059       0.054
  P-value H_0: RMSEA <= 0.050                    0.890       0.926
  P-value H_0: RMSEA >= 0.080                    0.003       0.001
                                                                  
  Robust RMSEA                                               0.000
  90 Percent confidence interval - lower                     0.000
  90 Percent confidence interval - upper                     0.060
  P-value H_0: Robust RMSEA <= 0.050                         0.888
  P-value H_0: Robust RMSEA >= 0.080                         0.005

Standardized Root Mean Square Residual:

  SRMR                                           0.033       0.033

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian


Group 1 [Group 2]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t1 =~                                                          
    slf_1_1           1.000                               0.726    0.874
    slf_2_1 (lmb2)    1.164    0.037   31.311    0.000    0.846    0.911
    slf_3_1 (lmb3)    1.145    0.048   23.709    0.000    0.832    0.795
  selfes_t2 =~                                                          
    slf_1_2           1.000                               0.741    0.878
    slf_2_2 (lmb2)    1.164    0.037   31.311    0.000    0.862    0.914
    slf_3_2 (lmb3)    1.145    0.048   23.709    0.000    0.849    0.800
  d_selfes_1 =~                                                         
    slfs_t2           1.000                               0.554    0.554

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t2 ~                                                           
    selfes_t1         1.000                               0.980    0.980

Covariances:
                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t1 ~~                                                           
    d_slf_1 (fb_a)    -0.074    0.033   -2.248    0.025   -0.247   -0.247
 .selfes_par1_t1 ~~                                                      
   .slf_1_2 (cov1)     0.061    0.024    2.542    0.011    0.061    0.375
 .selfes_par2_t1 ~~                                                      
   .slf_2_2 (cov2)     0.059    0.028    2.129    0.033    0.059    0.402
 .selfes_par3_t1 ~~                                                      
   .slf_3_2 (cov3)     0.104    0.040    2.579    0.010    0.104    0.257

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .slfs_t2           0.000                               0.000    0.000
    d_slf_1 (d_n_)    0.164    0.039    4.161    0.000    0.399    0.399
    slfs_t1 (wb__)    3.421    0.062   55.486    0.000    4.709    4.709
   .slf_1_1           0.000                               0.000    0.000
   .slf_2_1  (m2a)   -0.805    0.140   -5.738    0.000   -0.805   -0.867
   .slf_3_1  (m3a)   -1.184    0.183   -6.466    0.000   -1.184   -1.130
   .slf_1_2           0.000                               0.000    0.000
   .slf_2_2  (m2a)   -0.805    0.140   -5.738    0.000   -0.805   -0.853
   .slf_3_2  (m3a)   -1.184    0.183   -6.466    0.000   -1.184   -1.116

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .slfs_t2           0.000                               0.000    0.000
    d_slf_1 (d_v_)    0.169    0.035    4.826    0.000    1.000    1.000
    slfs_t1 (wb__)    0.528    0.060    8.822    0.000    1.000    1.000
   .slf_1_1 (res1)    0.163    0.023    7.103    0.000    0.163    0.236
   .slf_2_1 (res2)    0.147    0.026    5.753    0.000    0.147    0.171
   .slf_3_1 (res3)    0.404    0.041    9.790    0.000    0.404    0.368
   .slf_1_2 (res1)    0.163    0.023    7.103    0.000    0.163    0.229
   .slf_2_2 (res2)    0.147    0.026    5.753    0.000    0.147    0.165
   .slf_3_2 (res3)    0.404    0.041    9.790    0.000    0.404    0.359


Group 2 [Group 3]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t1 =~                                                          
    slf_1_1           1.000                               0.812    0.899
    slf_2_1 (lmb2)    1.164    0.037   31.311    0.000    0.945    0.945
    slf_3_1 (lmb3)    1.145    0.048   23.709    0.000    0.930    0.846
  selfes_t2 =~                                                          
    slf_1_2           1.000                               0.863    0.909
    slf_2_2 (lmb2)    1.164    0.037   31.311    0.000    1.005    0.951
    slf_3_2 (lmb3)    1.145    0.048   23.709    0.000    0.989    0.860
  d_selfes_1 =~                                                         
    slfs_t2           1.000                               0.390    0.390

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t2 ~                                                           
    selfes_t1         1.000                               0.940    0.940

Covariances:
                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  selfes_t1 ~~                                                           
    d_slf_1 (fb_b)    -0.014    0.029   -0.480    0.631   -0.050   -0.050
 .selfes_par1_t1 ~~                                                      
   .slf_1_2 (cv1b)     0.057    0.020    2.857    0.004    0.057    0.365
 .selfes_par2_t1 ~~                                                      
   .slf_2_2 (cv2b)     0.025    0.020    1.256    0.209    0.025    0.232
 .selfes_par3_t1 ~~                                                      
   .slf_3_2 (cv3b)     0.159    0.039    4.102    0.000    0.159    0.461

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .slfs_t2           0.000                               0.000    0.000
    d_slf_1 (d_n_)    0.009    0.033    0.283    0.777    0.028    0.028
    slfs_t1 (wb__)    3.709    0.067   55.648    0.000    4.568    4.568
   .slf_1_1           0.000                               0.000    0.000
   .slf_2_1  (m2b)   -0.824    0.147   -5.598    0.000   -0.824   -0.824
   .slf_3_1  (m3b)   -1.207    0.188   -6.419    0.000   -1.207   -1.098
   .slf_1_2           0.000                               0.000    0.000
   .slf_2_2  (m2b)   -0.824    0.147   -5.598    0.000   -0.824   -0.780
   .slf_3_2  (m3b)   -1.207    0.188   -6.419    0.000   -1.207   -1.050

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .slfs_t2           0.000                               0.000    0.000
    d_slf_1 (d_v_)    0.114    0.029    3.872    0.000    1.000    1.000
    slfs_t1 (wb__)    0.659    0.066   10.042    0.000    1.000    1.000
   .slf_1_1 (rs1b)    0.157    0.021    7.430    0.000    0.157    0.192
   .slf_2_1 (rs2b)    0.108    0.022    4.867    0.000    0.108    0.108
   .slf_3_1 (rs3b)    0.345    0.038    9.122    0.000    0.345    0.285
   .slf_1_2 (rs1b)    0.157    0.021    7.430    0.000    0.157    0.174
   .slf_2_2 (rs2b)    0.108    0.022    4.867    0.000    0.108    0.096
   .slf_3_2 (rs3b)    0.345    0.038    9.122    0.000    0.345    0.261
7.2.1.4 Self-concept clarity

Self-concept clarity: fitting multi-group models

Show the code
# adapt latent change score model from above and add grouping factor in estimation (also add vectorized equality constraints to the model)

# configural invariance -> model template created above
lcs_concept_group_23_config <- sem(mi_lcs_concept_group_config, data=df_sbsa3_wide_wb %>% 
                                     filter(group!="Group 1"), estimator='mlr', 
                                   fixed.x=FALSE, missing='fiml', group = "group")

# weak invariance -> model template created above
lcs_concept_group_23_weak <- sem(mi_lcs_concept_group_weak, data=df_sbsa3_wide_wb %>% 
                                   filter(group!="Group 1"), estimator='mlr', fixed.x=FALSE, missing='fiml', 
                                 group = "group", group.equal = "loadings")

# strong invariance -> model template created above
lcs_concept_group_23_strong <- sem(mi_lcs_concept_group_strong, data=df_sbsa3_wide_wb %>% 
                                     filter(group!="Group 1"), estimator='mlr', fixed.x=FALSE, missing='fiml',
                                   group = "group", group.equal = c("intercepts", "loadings"))

Self-concept clarity: results

# model comparison tests for measurement invariance
lavTestLRT(lcs_concept_group_23_config, lcs_concept_group_23_weak, lcs_concept_group_23_strong)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                            Df    AIC    BIC  Chisq Chisq diff Df diff
lcs_concept_group_23_config 24 3897.8 4014.0 27.042                   
lcs_concept_group_23_weak   26 3896.1 4004.5 29.306     2.2311       2
lcs_concept_group_23_strong 30 3896.8 3989.7 38.030     8.5761       4
                            Pr(>Chisq)  
lcs_concept_group_23_config             
lcs_concept_group_23_weak      0.32773  
lcs_concept_group_23_strong    0.07261 .
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# show model with varying latent change parameters 
# -> key parameter is "d_concept_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 2, b = Group 3)
params_lcs_concept_group_23_weak <- broom::tidy(lcs_concept_group_23_weak, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="concept" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_concept_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_lcs_concept_group_23_weak, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_concept_1 ~1 d_int_a 0.076 0.087 0.179 1.888 0.059
d_concept_1 ~1 d_int_b 0.076 0.087 0.198 2.127 0.033
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(lcs_concept_group_23_strong, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_wb_st3 %>% filter(trait=="concept" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_concept_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_concept_1 ~1 d_int 0.077 0.088 0.181 2.855 0.004
d_concept_1 ~1 d_int 0.077 0.088 0.201 2.855 0.004

Only very small positive change in self-concept clarity both groups (very similar in the two groups), that is n.s. in the weak invariance model but significant in the strong invariance model. No substantial differences according to the LRTs.

# whole model (weak invariance)
summary(lcs_concept_group_23_weak, fit.measures=TRUE, standardized=TRUE, rsquare=F)
lavaan 0.6.15 ended normally after 62 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        44
  Number of equality constraints                    16

  Number of observations per group:                   
    Group 2                                        179
    Group 3                                        176
  Number of missing patterns per group:               
    Group 2                                          2
    Group 3                                          2

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                29.306      28.349
  Degrees of freedom                                26          26
  P-value (Chi-square)                           0.297       0.341
  Scaling correction factor                                  1.034
    Yuan-Bentler correction (Mplus variant)                       
  Test statistic for each group:
    Group 2                                      9.046       8.751
    Group 3                                     20.260      19.599

Model Test Baseline Model:

  Test statistic                              1705.074    1407.968
  Degrees of freedom                                30          30
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.211

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.998       0.998
  Tucker-Lewis Index (TLI)                       0.998       0.998
                                                                  
  Robust Comparative Fit Index (CFI)                         0.998
  Robust Tucker-Lewis Index (TLI)                            0.998

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1920.043   -1920.043
  Scaling correction factor                                  0.671
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -1905.390   -1905.390
  Scaling correction factor                                  1.045
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                3896.086    3896.086
  Bayesian (BIC)                              4004.505    4004.505
  Sample-size adjusted Bayesian (SABIC)       3915.677    3915.677

Root Mean Square Error of Approximation:

  RMSEA                                          0.027       0.023
  90 Percent confidence interval - lower         0.000       0.000
  90 Percent confidence interval - upper         0.067       0.064
  P-value H_0: RMSEA <= 0.050                    0.791       0.827
  P-value H_0: RMSEA >= 0.080                    0.010       0.007
                                                                  
  Robust RMSEA                                               0.025
  90 Percent confidence interval - lower                     0.000
  90 Percent confidence interval - upper                     0.071
  P-value H_0: Robust RMSEA <= 0.050                         0.771
  P-value H_0: Robust RMSEA >= 0.080                         0.018

Standardized Root Mean Square Residual:

  SRMR                                           0.039       0.039

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian


Group 1 [Group 2]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t1 =~                                                         
    cnc_1_1           1.000                               0.668    0.794
    cnc_2_1 (lmb2)    1.257    0.053   23.678    0.000    0.839    0.852
    cnc_3_1 (lmb3)    1.290    0.055   23.296    0.000    0.861    0.913
  concept_t2 =~                                                         
    cnc_1_2           1.000                               0.679    0.799
    cnc_2_2 (lmb2)    1.257    0.053   23.678    0.000    0.853    0.855
    cnc_3_2 (lmb3)    1.290    0.055   23.296    0.000    0.876    0.916
  d_concept_1 =~                                                        
    cncpt_2           1.000                               0.624    0.624

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t2 ~                                                          
    concept_t1        1.000                               0.983    0.983

Covariances:
                     Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t1 ~~                                                           
    d_cnc_1 (fb_a)     -0.082    0.027   -3.039    0.002   -0.291   -0.291
 .concept_par1_t1 ~~                                                      
   .cnc_1_2 (cov1)      0.077    0.027    2.850    0.004    0.077    0.296
 .concept_par2_t1 ~~                                                      
   .cnc_2_2 (cov2)      0.083    0.031    2.688    0.007    0.083    0.311
 .concept_par3_t1 ~~                                                      
   .cnc_3_2 (cov3)      0.049    0.029    1.650    0.099    0.049    0.328

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .cncpt_2           0.000                               0.000    0.000
    d_cnc_1 (d_n_)    0.076    0.040    1.888    0.059    0.179    0.179
    cncpt_1 (wb__)    2.922    0.059   49.484    0.000    4.377    4.377
   .cnc_1_1           0.000                               0.000    0.000
   .cnc_2_1  (m2a)   -0.658    0.163   -4.033    0.000   -0.658   -0.668
   .cnc_3_1  (m3a)   -0.660    0.171   -3.866    0.000   -0.660   -0.699
   .cnc_1_2           0.000                               0.000    0.000
   .cnc_2_2  (m2a)   -0.658    0.163   -4.033    0.000   -0.658   -0.660
   .cnc_3_2  (m3a)   -0.660    0.171   -3.866    0.000   -0.660   -0.690

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .cncpt_2           0.000                               0.000    0.000
    d_cnc_1 (d_v_)    0.179    0.031    5.762    0.000    1.000    1.000
    cncpt_1 (wb__)    0.446    0.054    8.327    0.000    1.000    1.000
   .cnc_1_1 (res1)    0.261    0.025   10.624    0.000    0.261    0.369
   .cnc_2_1 (res2)    0.267    0.035    7.540    0.000    0.267    0.275
   .cnc_3_1 (res3)    0.148    0.030    4.933    0.000    0.148    0.166
   .cnc_1_2 (res1)    0.261    0.025   10.624    0.000    0.261    0.362
   .cnc_2_2 (res2)    0.267    0.035    7.540    0.000    0.267    0.268
   .cnc_3_2 (res3)    0.148    0.030    4.933    0.000    0.148    0.162


Group 2 [Group 3]:

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t1 =~                                                         
    cnc_1_1           1.000                               0.756    0.839
    cnc_2_1 (lmb2)    1.257    0.053   23.678    0.000    0.950    0.896
    cnc_3_1 (lmb3)    1.290    0.055   23.296    0.000    0.975    0.912
  concept_t2 =~                                                         
    cnc_1_2           1.000                               0.754    0.838
    cnc_2_2 (lmb2)    1.257    0.053   23.678    0.000    0.947    0.895
    cnc_3_2 (lmb3)    1.290    0.055   23.296    0.000    0.972    0.911
  d_concept_1 =~                                                        
    cncpt_2           1.000                               0.508    0.508

Regressions:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t2 ~                                                          
    concept_t1        1.000                               1.003    1.003

Covariances:
                     Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  concept_t1 ~~                                                           
    d_cnc_1 (fb_b)     -0.075    0.029   -2.539    0.011   -0.258   -0.258
 .concept_par1_t1 ~~                                                      
   .cnc_1_2 (cv1b)      0.078    0.025    3.073    0.002    0.078    0.322
 .concept_par2_t1 ~~                                                      
   .cnc_2_2 (cv2b)      0.052    0.035    1.506    0.132    0.052    0.235
 .concept_par3_t1 ~~                                                      
   .cnc_3_2 (cv3b)      0.103    0.035    2.978    0.003    0.103    0.535

Intercepts:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .cncpt_2           0.000                               0.000    0.000
    d_cnc_1 (d_n_)    0.076    0.036    2.127    0.033    0.198    0.198
    cncpt_1 (wb__)    3.130    0.067   46.847    0.000    4.142    4.142
   .cnc_1_1           0.000                               0.000    0.000
   .cnc_2_1  (m2b)   -0.724    0.174   -4.170    0.000   -0.724   -0.683
   .cnc_3_1  (m3b)   -0.814    0.185   -4.391    0.000   -0.814   -0.761
   .cnc_1_2           0.000                               0.000    0.000
   .cnc_2_2  (m2b)   -0.724    0.174   -4.170    0.000   -0.724   -0.684
   .cnc_3_2  (m3b)   -0.814    0.185   -4.391    0.000   -0.814   -0.763

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .cncpt_2           0.000                               0.000    0.000
    d_cnc_1 (d_v_)    0.146    0.029    5.009    0.000    1.000    1.000
    cncpt_1 (wb__)    0.571    0.064    8.981    0.000    1.000    1.000
   .cnc_1_1 (rs1b)    0.241    0.027    8.778    0.000    0.241    0.296
   .cnc_2_1 (rs2b)    0.223    0.031    7.223    0.000    0.223    0.198
   .cnc_3_1 (rs3b)    0.193    0.034    5.721    0.000    0.193    0.169
   .cnc_1_2 (rs1b)    0.241    0.027    8.778    0.000    0.241    0.297
   .cnc_2_2 (rs2b)    0.223    0.031    7.223    0.000    0.223    0.199
   .cnc_3_2 (rs3b)    0.193    0.034    5.721    0.000    0.193    0.170

7.2.2 Latent change: differences in personality change

7.2.2.1 Run models

Adapt latent change score model from above (but without any moderation) and add grouping factor in estimation (adding vectorized equality constraints to the model step by step):

Show the code
# create templates: (same as those already created above)

# loop across 5 traits
for (i in 1:5) {
  item_nrs = b5_vars[[i]][[1]]
  short_name = str_trunc(names(b5_vars)[i], 5, ellipsis = "")
  # configural - current
  template_filled_config_current <- str_replace_all(trait_template_main_config, 
                                       c("trait" = short_name,
                                         "ind01" = paste0(short_name, "_curr_par1"), 
                                         "ind02" = paste0(short_name, "_curr_par2"), 
                                         "ind03" = paste0(short_name, "_curr_par3")))
  trait_main_fit_config_current <- lavaan(template_filled_config_current, 
                                          data = df_sbsa3_wide_pers %>% 
                                            filter(group!="Group 1"), 
                                          estimator='mlr', fixed.x=FALSE, missing='fiml', group = "group")
  eval(call("<-", as.name(paste0("lcs_", short_name, "_main_config_group_23")), template_filled_config_current))
  eval(call("<-", as.name(paste0("fit_lcs_", short_name, "_main_config_group_23")), trait_main_fit_config_current))
  # weak - current
  template_filled_weak_current <- str_replace_all(trait_template_main_weak, 
                                       c("trait" = short_name,
                                         "ind01" = paste0(short_name, "_curr_par1"), 
                                         "ind02" = paste0(short_name, "_curr_par2"), 
                                         "ind03" = paste0(short_name, "_curr_par3")))
  trait_main_fit_weak_current <- lavaan(template_filled_weak_current, 
                                          data = df_sbsa3_wide_pers %>% 
                                          filter(group!="Group 1"), 
                                          estimator='mlr', fixed.x=FALSE, missing='fiml', 
                                        group = "group", group.equal = "loadings")
  eval(call("<-", as.name(paste0("lcs_", short_name, "_main_weak_group_23")), template_filled_weak_current))
  eval(call("<-", as.name(paste0("fit_lcs_", short_name, "_main_weak_group_23")), trait_main_fit_weak_current))
  # strong - current
  template_filled_strong_current <- str_replace_all(trait_template_main_strong, 
                                       c("trait" = short_name,
                                         "ind01" = paste0(short_name, "_curr_par1"), 
                                         "ind02" = paste0(short_name, "_curr_par2"), 
                                         "ind03" = paste0(short_name, "_curr_par3")))
  trait_main_fit_strong_current <- lavaan(template_filled_strong_current, 
                                          data = df_sbsa3_wide_pers %>% 
                                            filter(group!="Group 1"), 
                                          estimator='mlr', fixed.x=FALSE, missing='fiml', 
                                          group = "group", group.equal = c("intercepts", "loadings"))
  eval(call("<-", as.name(paste0("lcs_", short_name, "_main_strong_group_23")), template_filled_strong_current))
  eval(call("<-", as.name(paste0("fit_lcs_", short_name, "_main_strong_group_23")), trait_main_fit_strong_current))
}
7.2.2.2 Current personality personality traits

Extraversion (current personality): results

# model comparison tests for measurement invariance
lavTestLRT(fit_lcs_extra_main_config_group_23, fit_lcs_extra_main_weak_group_23, fit_lcs_extra_main_strong_group_23)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                                   Df    AIC    BIC  Chisq Chisq diff Df diff
fit_lcs_extra_main_config_group_23 24 3313.7 3429.9 35.195                   
fit_lcs_extra_main_weak_group_23   26 3312.4 3420.9 37.936     2.9066       2
fit_lcs_extra_main_strong_group_23 30 3317.8 3410.7 51.292    12.9389       4
                                   Pr(>Chisq)  
fit_lcs_extra_main_config_group_23             
fit_lcs_extra_main_weak_group_23      0.23380  
fit_lcs_extra_main_strong_group_23    0.01158 *
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# show model with varying latent change parameters 
# -> key parameter is "d_extra_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 2, b = Group 3)
params_main_lcs_extra_group_23 <- broom::tidy(fit_lcs_extra_main_weak_group_23, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="extra" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_extra_1 ~1 ")) %>% mutate(sig_diff = "*")
kable(params_main_lcs_extra_group_23, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_extra_1 ~1 d_int_a 0.091 0.119 0.284 2.694 0.007 *
d_extra_1 ~1 d_int_b -0.039 -0.051 -0.258 -1.530 0.126 *
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(fit_lcs_extra_main_strong_group_23, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="extra" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_extra_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_extra_1 ~1 d_int 0.004 0.005 0.012 0.19 0.849
d_extra_1 ~1 d_int 0.004 0.005 0.027 0.19 0.849
# model fit
kable(broom::glance(fit_lcs_extra_main_weak_group_23) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
355 28 3312.443 3420.862 0.993 0.051 0.045

Small but significant change in extraversion in Group 2 but no significant change in Group 3 (in weak invariance model). No change in strong invariance model. According to the LRT, only weak invariance is given (at p < .05; strong at p < .01).

Agreeableness (current personality): results

# model comparison tests for measurement invariance
lavTestLRT(fit_lcs_agree_main_config_group_23, fit_lcs_agree_main_weak_group_23, fit_lcs_agree_main_strong_group_23)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                                   Df    AIC    BIC  Chisq Chisq diff Df diff
fit_lcs_agree_main_config_group_23 24 3226.1 3342.2 33.599                   
fit_lcs_agree_main_weak_group_23   26 3224.0 3332.4 35.570     2.0013       2
fit_lcs_agree_main_strong_group_23 30 3219.1 3312.1 38.679     2.9883       4
                                   Pr(>Chisq)
fit_lcs_agree_main_config_group_23           
fit_lcs_agree_main_weak_group_23       0.3676
fit_lcs_agree_main_strong_group_23     0.5598
# show model with varying latent change parameters 
# -> key parameter is "d_agree_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 2, b = Group 3)
params_main_lcs_agree_group_23 <- broom::tidy(fit_lcs_agree_main_weak_group_23, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="agree" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_agree_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_main_lcs_agree_group_23, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_agree_1 ~1 d_int_a 0.044 0.075 0.170 1.603 0.109
d_agree_1 ~1 d_int_b -0.007 -0.012 -0.049 -0.324 0.746
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(fit_lcs_agree_main_strong_group_23, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="agree" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_agree_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_agree_1 ~1 d_int 0.013 0.022 0.050 0.716 0.474
d_agree_1 ~1 d_int 0.013 0.022 0.087 0.716 0.474
# model fit
kable(broom::glance(fit_lcs_agree_main_weak_group_23) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
355 28 3224.022 3332.442 0.992 0.046 0.04

No change in agreeableness in both groups (increase is a tiny bit larger in Group 2 compared to Group 3). No significant differences according to the LRTs.

Conscientiousness (current personality): results

# model comparison tests for measurement invariance
lavTestLRT(fit_lcs_consc_main_config_group_23, fit_lcs_consc_main_weak_group_23, fit_lcs_consc_main_strong_group_23)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                                   Df    AIC    BIC  Chisq Chisq diff Df diff
fit_lcs_consc_main_config_group_23 24 3093.7 3209.9 24.840                   
fit_lcs_consc_main_weak_group_23   26 3090.4 3198.8 25.499     0.7707       2
fit_lcs_consc_main_strong_group_23 30 3095.5 3188.4 38.635    12.7522       4
                                   Pr(>Chisq)  
fit_lcs_consc_main_config_group_23             
fit_lcs_consc_main_weak_group_23      0.68022  
fit_lcs_consc_main_strong_group_23    0.01255 *
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# show model with varying latent change parameters 
# -> key parameter is "d_consc_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 2, b = Group 3)
params_main_lcs_consc_group_23 <- broom::tidy(fit_lcs_consc_main_weak_group_23, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="consc" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_consc_1 ~1 ")) %>% mutate(sig_diff = "*")
kable(params_main_lcs_consc_group_23, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_consc_1 ~1 d_int_a 0.068 0.090 0.278 2.389 0.017 *
d_consc_1 ~1 d_int_b 0.040 0.052 0.167 1.487 0.137 *
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(fit_lcs_consc_main_strong_group_23, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="consc" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_consc_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_consc_1 ~1 d_int 0.052 0.069 0.211 2.617 0.009
d_consc_1 ~1 d_int 0.052 0.069 0.219 2.617 0.009
# model fit
kable(broom::glance(fit_lcs_consc_main_weak_group_23) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
355 28 3090.36 3198.779 1 0 0.029

Small positive change in conscientiousness in both groups. This increase is slightly larger in Group 2 compared to Group 3. No significant differences according to the LRTs (at p < .01, but strong invariance not given at p < .05).

Neuroticism (current personality): results

# model comparison tests for measurement invariance
lavTestLRT(fit_lcs_neuro_main_config_group_23, fit_lcs_neuro_main_weak_group_23, fit_lcs_neuro_main_strong_group_23)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                                   Df    AIC    BIC  Chisq Chisq diff Df diff
fit_lcs_neuro_main_config_group_23 24 3165.7 3281.8 38.800                   
fit_lcs_neuro_main_weak_group_23   26 3162.0 3270.5 39.162     0.3657       2
fit_lcs_neuro_main_strong_group_23 30 3170.2 3263.1 55.319    16.8305       4
                                   Pr(>Chisq)   
fit_lcs_neuro_main_config_group_23              
fit_lcs_neuro_main_weak_group_23     0.832882   
fit_lcs_neuro_main_strong_group_23   0.002085 **
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# show model with varying latent change parameters 
# -> key parameter is "d_neuro_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 2, b = Group 3)
params_main_lcs_neuro_group_23 <- broom::tidy(fit_lcs_neuro_main_weak_group_23, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="neuro" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_neuro_1 ~1 ")) %>% mutate(sig_diff = "**")
kable(params_main_lcs_neuro_group_23, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_neuro_1 ~1 d_int_a -0.125 -0.141 -0.459 -4.268 0.000 **
d_neuro_1 ~1 d_int_b 0.018 0.020 0.059 0.578 0.563 **
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(fit_lcs_neuro_main_strong_group_23, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="neuro" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_neuro_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_neuro_1 ~1 d_int -0.055 -0.062 -0.194 -2.344 0.019
d_neuro_1 ~1 d_int -0.055 -0.062 -0.179 -2.344 0.019
# model fit
kable(broom::glance(fit_lcs_neuro_main_weak_group_23) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
355 28 3162.035 3270.454 0.994 0.053 0.025

Significant decrease in neuroticism only in Group 2, no change in Group 3. Significant difference according to the LRT (strong invariance not given!).

Openness (current personality): results

# model comparison tests for measurement invariance
lavTestLRT(fit_lcs_openn_main_config_group_23, fit_lcs_openn_main_weak_group_23, fit_lcs_openn_main_strong_group_23)

Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")

lavaan NOTE:
    The "Chisq" column contains standard test statistics, not the
    robust test that should be reported per model. A robust difference
    test is a function of two standard (not robust) statistics.
 
                                   Df    AIC    BIC  Chisq Chisq diff Df diff
fit_lcs_openn_main_config_group_23 24 3290.0 3406.1 22.233                   
fit_lcs_openn_main_weak_group_23   26 3287.8 3396.3 24.097     1.9657       2
fit_lcs_openn_main_strong_group_23 30 3284.3 3377.2 28.526     4.2923       4
                                   Pr(>Chisq)
fit_lcs_openn_main_config_group_23           
fit_lcs_openn_main_weak_group_23       0.3742
fit_lcs_openn_main_strong_group_23     0.3679
# show model with varying latent change parameters 
# -> key parameter is "d_openn_1 ~1"
# labelled parameter as "d_int_a" & "d_int_b" (a = Group 2, b = Group 3)
params_main_lcs_openn_group_23 <- broom::tidy(fit_lcs_openn_main_weak_group_23, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="openn" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_openn_1 ~1 ")) %>% mutate(sig_diff = " ")
kable(params_main_lcs_openn_group_23, digits = 3) 
term label estimate beta std.all statistic p.value sig_diff
d_openn_1 ~1 d_int_a 0.042 0.066 0.217 1.766 0.077
d_openn_1 ~1 d_int_b -0.001 -0.001 -0.013 -0.035 0.972
# constrained to be equal in the strong measurement invariance model:
kable(broom::tidy(fit_lcs_openn_main_strong_group_23, conf.int = TRUE, conf.level = 0.99) %>% 
        mutate(beta = estimate / 
           (sd_pers_st3 %>% filter(trait=="openn" & reference=="current" & time==1) %>% pull(sd_trait))) %>% 
        select(term, label, estimate, beta, std.all, statistic, p.value) %>% 
        filter(term %in% c("d_openn_1 ~1 ")), digits = 3) 
term label estimate beta std.all statistic p.value
d_openn_1 ~1 d_int 0.016 0.025 0.084 1.096 0.273
d_openn_1 ~1 d_int 0.016 0.025 0.313 1.096 0.273
# model fit
kable(broom::glance(fit_lcs_openn_main_weak_group_23) %>% 
        select(nobs, npar, AIC, BIC, cfi, rmsea, srmr), digits = 3)
nobs npar AIC BIC cfi rmsea srmr
355 28 3287.837 3396.256 1 0 0.04

Almost no change in current personality openness in both groups but slightly stronger increase in Group 2 compared to Group 3. No significant differences according to the LRTs, though.

7.3 Summary of latent main effects

7.3.1 Big Five

7.3.1.1 Group 1 vs. 2
params_main_summary_12 <- bind_rows(
  params_main_lcs_extra_group_12 %>% mutate(trait = names(b5_vars)[1], self = "current") %>% select(trait, self, group = label, estimate, beta, p.value, sig_diff),
  params_main_lcs_agree_group_12 %>% mutate(trait = names(b5_vars)[2], self = "current") %>% select(trait, self, group = label, estimate, beta, p.value, sig_diff),
  params_main_lcs_consc_group_12 %>% mutate(trait = names(b5_vars)[3], self = "current") %>% select(trait, self, group = label, estimate, beta, p.value, sig_diff),
  params_main_lcs_neuro_group_12 %>% mutate(trait = names(b5_vars)[4], self = "current") %>% select(trait, self, group = label, estimate, beta, p.value, sig_diff),
  params_main_lcs_openn_group_12 %>% mutate(trait = names(b5_vars)[5], self = "current") %>% select(trait, self, group = label, estimate, beta, p.value, sig_diff)) %>% mutate(group = ifelse(group=="d_int_a", "Group 1", "Group 2"))

kable(params_main_summary_12, digits = 3)
trait self group estimate beta p.value sig_diff
extraversion current Group 1 0.074 0.098 0.006
extraversion current Group 2 0.088 0.116 0.006
agreeableness current Group 1 0.051 0.087 0.084
agreeableness current Group 2 0.047 0.079 0.105
conscientiousness current Group 1 0.099 0.131 0.003
conscientiousness current Group 2 0.068 0.090 0.018
neuroticism current Group 1 -0.052 -0.059 0.151
neuroticism current Group 2 -0.128 -0.145 0.000
openness current Group 1 0.022 0.035 0.412
openness current Group 2 0.044 0.068 0.072
7.3.1.2 Group 2 vs. 3
params_main_summary_23 <- bind_rows(
  params_main_lcs_extra_group_23 %>% mutate(trait = names(b5_vars)[1], self = "current") %>% select(trait, self, group = label, estimate, beta, p.value, sig_diff),
  params_main_lcs_agree_group_23 %>% mutate(trait = names(b5_vars)[2], self = "current") %>% select(trait, self, group = label, estimate, beta, p.value, sig_diff),
  params_main_lcs_consc_group_23 %>% mutate(trait = names(b5_vars)[3], self = "current") %>% select(trait, self, group = label, estimate, beta, p.value, sig_diff),
  params_main_lcs_neuro_group_23 %>% mutate(trait = names(b5_vars)[4], self = "current") %>% select(trait, self, group = label, estimate, beta, p.value, sig_diff),
  params_main_lcs_openn_group_23 %>% mutate(trait = names(b5_vars)[5], self = "current") %>% select(trait, self, group = label, estimate, beta, p.value, sig_diff)) %>% mutate(group = ifelse(group=="d_int_a", "Group 2", "Group 3"))

kable(params_main_summary_23, digits = 3)
trait self group estimate beta p.value sig_diff
extraversion current Group 2 0.091 0.119 0.007 *
extraversion current Group 3 -0.039 -0.051 0.126 *
agreeableness current Group 2 0.044 0.075 0.109
agreeableness current Group 3 -0.007 -0.012 0.746
conscientiousness current Group 2 0.068 0.090 0.017 *
conscientiousness current Group 3 0.040 0.052 0.137 *
neuroticism current Group 2 -0.125 -0.141 0.000 **
neuroticism current Group 3 0.018 0.020 0.563 **
openness current Group 2 0.042 0.066 0.077
openness current Group 3 -0.001 -0.001 0.972

Group 2 estimates very similar across model comparisons!

7.3.2 Well-being

7.3.2.1 Group 1 vs. 2
params_main_summary_wb_12 <- bind_rows(
  params_lcs_swls_group_12_weak %>% mutate(trait = "Life Satisfaction") %>% select(trait, group = label, estimate, beta, p.value, sig_diff),
  params_lcs_meaning_group_12_weak %>% mutate(trait = "Meaning in Life") %>% select(trait, group = label, estimate, beta, p.value, sig_diff),
    params_lcs_search_group_12_weak %>% mutate(trait = "Search for Meaning") %>% select(trait, group = label, estimate, beta, p.value, sig_diff),
  params_lcs_selfes_group_12_weak %>% mutate(trait = "Self-Esteem") %>% select(trait, group = label, estimate, beta, p.value, sig_diff),
  params_lcs_concept_group_12_weak %>% mutate(trait = "Self-Concept Clarity") %>% select(trait, group = label, estimate, beta, p.value, sig_diff)) %>% mutate(group = ifelse(group=="d_int_a", "Group 1", "Group 2"))

kable(params_main_summary_wb_12, digits = 3)
trait group estimate beta p.value sig_diff
Life Satisfaction Group 1 0.187 0.186 0.001
Life Satisfaction Group 2 0.259 0.258 0.000
Meaning in Life Group 1 0.091 0.061 0.241
Meaning in Life Group 2 0.079 0.053 0.324
Search for Meaning Group 1 -0.144 -0.101 0.074
Search for Meaning Group 2 -0.147 -0.103 0.071
Self-Esteem Group 1 0.108 0.122 0.006 **
Self-Esteem Group 2 0.166 0.187 0.000 **
Self-Concept Clarity Group 1 0.159 0.182 0.000
Self-Concept Clarity Group 2 0.080 0.091 0.057
7.3.2.2 Group 2 vs. 3
params_main_summary_wb_23 <- bind_rows(
  params_lcs_swls_group_23_weak %>% mutate(trait = "Life Satisfaction") %>% select(trait, group = label, estimate, beta, p.value, sig_diff),
  params_lcs_meaning_group_23_weak %>% mutate(trait = "Meaning in Life") %>% select(trait, group = label, estimate, beta, p.value, sig_diff),  
  params_lcs_search_group_23_weak %>% mutate(trait = "Search for Meaning") %>% select(trait, group = label, estimate, beta, p.value, sig_diff),
  params_lcs_selfes_group_23_weak %>% mutate(trait = "Self-Esteem") %>% select(trait, group = label, estimate, beta, p.value, sig_diff),
  params_lcs_concept_group_23_weak %>% mutate(trait = "Self-Concept Clarity") %>% select(trait, group = label, estimate, beta, p.value, sig_diff)) %>% mutate(group = ifelse(group=="d_int_a", "Group 2", "Group 3"))

kable(params_main_summary_wb_23, digits = 3)
trait group estimate beta p.value sig_diff
Life Satisfaction Group 2 0.255 0.253 0.000 **
Life Satisfaction Group 3 0.019 0.019 0.720 **
Meaning in Life Group 2 0.079 0.053 0.321
Meaning in Life Group 3 0.007 0.005 0.920
Search for Meaning Group 2 -0.151 -0.106 0.067
Search for Meaning Group 3 -0.147 -0.104 0.074
Self-Esteem Group 2 0.164 0.185 0.000 **
Self-Esteem Group 3 0.009 0.011 0.777 **
Self-Concept Clarity Group 2 0.076 0.087 0.059
Self-Concept Clarity Group 3 0.076 0.087 0.033

Group 2 estimates very similar across model comparisons!

7.4 Plot Changes (Latent Scores)

7.4.1 Personality

Gather all latent intercepts at T1 and latent change scores:

Show the code
b5_latent_df_plot <- bind_rows(
  broom::tidy(fit_lcs_extra_main_weak_group_12, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("extra_t1 ~1 ", "d_extra_1 ~1 ")) %>% 
    mutate(Trait = "Extraversion", Group = ifelse(label %in% c("d_int_a", "b5_int_a"), "Group 1:\nDiscrepancy awareness\n", "Group 2:\nDemand effects\n")),
  broom::tidy(fit_lcs_extra_main_weak_group_23, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("extra_t1 ~1 ", "d_extra_1 ~1 ") & label %in% c("d_int_b", "b5_int_b")) %>% 
    mutate(Trait = "Extraversion", Group = "Group 3:\nDevelopmental trends\n"),
  broom::tidy(fit_lcs_agree_main_weak_group_12, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("agree_t1 ~1 ", "d_agree_1 ~1 ")) %>% 
    mutate(Trait = "Agreeableness", Group = ifelse(label %in% c("d_int_a", "b5_int_a"), "Group 1:\nDiscrepancy awareness\n", "Group 2:\nDemand effects\n")),
  broom::tidy(fit_lcs_agree_main_weak_group_23, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("agree_t1 ~1 ", "d_agree_1 ~1 ") & label %in% c("d_int_b", "b5_int_b")) %>% 
    mutate(Trait = "Agreeableness", Group = "Group 3:\nDevelopmental trends\n"),
  broom::tidy(fit_lcs_consc_main_weak_group_12, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("consc_t1 ~1 ", "d_consc_1 ~1 ")) %>% 
    mutate(Trait = "Conscientiousness", Group = ifelse(label %in% c("d_int_a", "b5_int_a"), "Group 1:\nDiscrepancy awareness\n", "Group 2:\nDemand effects\n")),
  broom::tidy(fit_lcs_consc_main_weak_group_23, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("consc_t1 ~1 ", "d_consc_1 ~1 ") & label %in% c("d_int_b", "b5_int_b")) %>% 
    mutate(Trait = "Conscientiousness", Group = "Group 3:\nDevelopmental trends\n"),
  broom::tidy(fit_lcs_neuro_main_weak_group_12, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("neuro_t1 ~1 ", "d_neuro_1 ~1 ")) %>% 
    mutate(Trait = "Neuroticism", Group = ifelse(label %in% c("d_int_a", "b5_int_a"), "Group 1:\nDiscrepancy awareness\n", "Group 2:\nDemand effects\n")),
  broom::tidy(fit_lcs_neuro_main_weak_group_23, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("neuro_t1 ~1 ", "d_neuro_1 ~1 ") & label %in% c("d_int_b", "b5_int_b")) %>% 
    mutate(Trait = "Neuroticism", Group = "Group 3:\nDevelopmental trends\n"),
  broom::tidy(fit_lcs_openn_main_weak_group_12, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("openn_t1 ~1 ", "d_openn_1 ~1 ")) %>% 
    mutate(Trait = "Openness", Group = ifelse(label %in% c("d_int_a", "b5_int_a"), "Group 1:\nDiscrepancy awareness\n", "Group 2:\nDemand effects\n")),
  broom::tidy(fit_lcs_openn_main_weak_group_23, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("openn_t1 ~1 ", "d_openn_1 ~1 ") & label %in% c("d_int_b", "b5_int_b")) %>% 
    mutate(Trait = "Openness", Group = "Group 3:\nDevelopmental trends\n"))

# select and mutate relevant vars
b5_latent_df_plot <- b5_latent_df_plot %>% 
  mutate(param = ifelse(label %in% c("d_int_a", "d_int_b"), "slope", "intercept")) %>% 
  select(Trait, Group, estimate, std.error, param, conf.low, conf.high) %>% 
  pivot_wider(names_from = param,
              names_sep = "_",
              values_from = -c(Trait, Group, param)) %>% 
  mutate(T2_estimate = estimate_intercept + estimate_slope, T1_estimate = estimate_intercept,
         T2_cilow = T2_estimate - qnorm(0.995)*(sqrt(std.error_intercept^2 + std.error_slope^2)), 
         T2_cihi  = T2_estimate + qnorm(0.995)*(sqrt(std.error_intercept^2 + std.error_slope^2))) %>% 
  rename(T1_cilow = conf.low_intercept, T1_cihi = conf.high_intercept) %>% 
  select(-c(estimate_slope, estimate_intercept, conf.low_slope, conf.high_slope, std.error_slope, std.error_intercept))

# reshape
b5_latent_df_plot <- b5_latent_df_plot %>% 
  pivot_longer(-c(Trait, Group),
               names_to = c("Time", ".value"),
               names_pattern = "T(\\d)_(.*)") %>% 
  mutate(Time = factor(str_c("T", Time), levels = c("T1", "T2"), labels = c("T1", "T2"), ordered = T))

Plot changes (unstandardized effect size metric): all together

Show the code
ggplot(b5_latent_df_plot, 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(2.5, 4.5) + 
  facet_wrap( ~ Trait, ncol = 3) +
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting model based mean-level changes: Extraversion

Show the code
ggplot(b5_latent_df_plot %>% filter(Trait=="Extraversion"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(2.5, 3.5) + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting model based mean-level changes: Agreeableness

Show the code
ggplot(b5_latent_df_plot %>% filter(Trait=="Agreeableness"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(3.5, 4.5) + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting model based mean-level changes: Conscientiousness

Show the code
ggplot(b5_latent_df_plot %>% filter(Trait=="Conscientiousness"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(2.75, 3.75) + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting model based mean-level changes: Neuroticism

Show the code
ggplot(b5_latent_df_plot %>% filter(Trait=="Neuroticism"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(2.5, 3.5) + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting model based mean-level changes: Openness

Show the code
ggplot(b5_latent_df_plot %>% filter(Trait=="Openness"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(3, 4) + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

7.4.2 Well-being

Gather all latent intercepts at T1 and latent change scores:

Show the code
wb_latent_df_plot <- bind_rows(
  broom::tidy(lcs_swls_group_12_weak, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("swls_t1 ~1 ", "d_swls_1 ~1 ")) %>% 
    mutate(Trait = "Life Satisfaction", Group = ifelse(label %in% c("d_int_a", "wb_int_a"), "Group 1:\nDiscrepancy awareness\n", "Group 2:\nDemand effects\n")),
  broom::tidy(lcs_swls_group_23_weak, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("swls_t1 ~1 ", "d_swls_1 ~1 ") & label %in% c("d_int_b", "wb_int_b")) %>% 
    mutate(Trait = "Life Satisfaction", Group = "Group 3:\nDevelopmental trends\n"),
  broom::tidy(lcs_meaning_group_12_weak, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("meaning_t1 ~1 ", "d_meaning_1 ~1 ")) %>% 
    mutate(Trait = "Meaning in Life", Group = ifelse(label %in% c("d_int_a", "wb_int_a"), "Group 1:\nDiscrepancy awareness\n", "Group 2:\nDemand effects\n")),
  broom::tidy(lcs_meaning_group_23_weak, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("meaning_t1 ~1 ", "d_meaning_1 ~1 ") & label %in% c("d_int_b", "wb_int_b")) %>% 
    mutate(Trait = "Meaning in Life", Group = "Group 3:\nDevelopmental trends\n"),
  broom::tidy(lcs_search_group_12_weak, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("search_t1 ~1 ", "d_search_1 ~1 ")) %>% 
    mutate(Trait = "Search for Meaning", Group = ifelse(label %in% c("d_int_a", "wb_int_a"), "Group 1:\nDiscrepancy awareness\n", "Group 2:\nDemand effects\n")),
  broom::tidy(lcs_search_group_23_weak, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("search_t1 ~1 ", "d_search_1 ~1 ") & label %in% c("d_int_b", "wb_int_b")) %>% 
    mutate(Trait = "Search for Meaning", Group = "Group 3:\nDevelopmental trends\n"),
  broom::tidy(lcs_selfes_group_12_weak, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("selfes_t1 ~1 ", "d_selfes_1 ~1 ")) %>% 
    mutate(Trait = "Self-Esteem", Group = ifelse(label %in% c("d_int_a", "wb_int_a"), "Group 1:\nDiscrepancy awareness\n", "Group 2:\nDemand effects\n")),
  broom::tidy(lcs_selfes_group_23_weak, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("selfes_t1 ~1 ", "d_selfes_1 ~1 ") & label %in% c("d_int_b", "wb_int_b")) %>% 
    mutate(Trait = "Self-Esteem", Group = "Group 3:\nDevelopmental trends\n"),
  broom::tidy(lcs_concept_group_12_weak, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("concept_t1 ~1 ", "d_concept_1 ~1 ")) %>% 
    mutate(Trait = "Self-Concept Clarity", Group = ifelse(label %in% c("d_int_a", "wb_int_a"), "Group 1:\nDiscrepancy awareness\n", "Group 2:\nDemand effects\n")),
  broom::tidy(lcs_concept_group_23_weak, conf.int = T, conf.level = .99) %>% select(term, label, estimate, std.error, conf.low, conf.high) %>% 
    filter(term %in% c("concept_t1 ~1 ", "d_concept_1 ~1 ") & label %in% c("d_int_b", "wb_int_b")) %>% 
    mutate(Trait = "Self-Concept Clarity", Group = "Group 3:\nDevelopmental trends\n"))

# select and mutate relevant vars
wb_latent_df_plot <- wb_latent_df_plot %>% 
  mutate(param = ifelse(label %in% c("d_int_a", "d_int_b"), "slope", "intercept")) %>% 
  select(Trait, Group, estimate, std.error, param, conf.low, conf.high) %>% 
  pivot_wider(names_from = param,
              names_sep = "_",
              values_from = -c(Trait, Group, param)) %>% 
  mutate(T2_estimate = estimate_intercept + estimate_slope, T1_estimate = estimate_intercept,
         T2_cilow = T2_estimate - qnorm(0.995)*(sqrt(std.error_intercept^2 + std.error_slope^2)), 
         T2_cihi  = T2_estimate + qnorm(0.995)*(sqrt(std.error_intercept^2 + std.error_slope^2))) %>% 
  rename(T1_cilow = conf.low_intercept, T1_cihi = conf.high_intercept) %>% 
  select(-c(estimate_slope, estimate_intercept, conf.low_slope, conf.high_slope, std.error_slope, std.error_intercept))

# reshape
wb_latent_df_plot <- wb_latent_df_plot %>% 
  pivot_longer(-c(Trait, Group),
               names_to = c("Time", ".value"),
               names_pattern = "T(\\d)_(.*)") %>% 
  mutate(Time = factor(str_c("T", Time), levels = c("T1", "T2"), labels = c("T1", "T2"), ordered = T))

Plot changes (unstandardized effect size metric): all together

Show the code
ggplot(wb_latent_df_plot %>% filter(Trait!="Search for Meaning"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  facet_wrap( ~ Trait, ncol = 2) +
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting model based mean-level changes: Life Satisfaction

Show the code
ggplot(wb_latent_df_plot %>% filter(Trait=="Life Satisfaction"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(2, 3.5) + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting model based mean-level changes: Meaning in Life

Show the code
ggplot(wb_latent_df_plot %>% filter(Trait=="Meaning in Life"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(3.5, 5) + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting model based mean-level changes: Search for Meaning

Show the code
ggplot(wb_latent_df_plot %>% filter(Trait=="Search for Meaning"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(4, 6) + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting model based mean-level changes: Self-Esteem

Show the code
ggplot(wb_latent_df_plot %>% filter(Trait=="Self-Esteem"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(2.5, 4) + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

Plotting model based mean-level changes: Self-Concept Clarity

Show the code
ggplot(wb_latent_df_plot %>% filter(Trait=="Self-Concept Clarity"), 
       aes(y = estimate, x = Time)) + 
  geom_point(aes(shape = Group, group = Group, color = Group), position = position_dodge(0.3)) +
  geom_line(aes(group = Group, color = Group, linetype = Group), position = position_dodge(0.3)) +
  scale_shape_manual(values=c(17, 19, 15)) + 
  geom_errorbar(aes(group = Group, color = Group, ymin = cilow, ymax = cihi), width=.3, position=position_dodge(0.3)) + 
  ylab("Mean Trait Level\n(Model-Based, 99% CI)") + 
  ylim(2.5, 4) + 
  theme_bw() +
  theme(panel.grid.minor = element_blank(),
        strip.background=element_rect(fill="grey93"))

8 Robustness check (MLMs)

Do changes in current personality trait change depend on the study group? Using multilevel change models instead of SEM latent change score models.

8.1 Group 1 vs. Group 2: Effect of discrepancy awareness

Changes in current personality traits over time:

Show the code
df_manip_check_curr_12 <- df_sbsa3 %>% 
  filter(group!="Group 3") %>% 
  mutate(time_d = time - 1) %>% 
  select(pid, time_d, group, 
         paste0(str_trunc(names(b5_vars), 5, ellipsis = ""), "_comb_curr")) %>% 
  pivot_longer(ends_with("_comb_curr"), 
               names_to = "test", names_prefix = "facet", values_to = "score", values_drop_na = TRUE) %>% 
  mutate(group_1 = ifelse(group=="Group 1", 1, 0)) %>% 
  group_nest(test) %>% 
  mutate(lmer_mods = map(data, ~lmerTest::lmer(score ~ time_d * group_1 + (1 | pid), data = .x))) %>% 
  pull(lmer_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_manip_check_curr_unlist_12 <- as.data.frame(summary(df_manip_check_curr_12[[1]])$coefficients) %>% as_tibble()
for (i in 2:length(df_manip_check_curr_12)) {
  df_manip_check_curr_unlist_12 <- bind_rows(df_manip_check_curr_unlist_12, 
                                          as.data.frame(summary(df_manip_check_curr_12[[i]])$coefficients) %>% as_tibble())
}
kable(df_manip_check_curr_unlist_12 %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=4), 
               term = c(rep(c("Intercept", "time", "group_1", "time*group_1"), 20))) %>% 
        rename(p = `Pr(>|t|)`, str_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, str_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars)))), # order by BFI traits, 
      digits = 3)
outcome term est str_err p sig
extraversion Intercept 2.921 0.056 0.000 ***
extraversion time 0.090 0.031 0.003 **
extraversion group_1 -0.089 0.080 0.266
extraversion time*group_1 -0.014 0.043 0.743
agreeableness Intercept 3.615 0.043 0.000 ***
agreeableness time 0.052 0.034 0.125
agreeableness group_1 0.072 0.061 0.237
agreeableness time*group_1 -0.002 0.048 0.961
conscientiousness Intercept 3.364 0.055 0.000 ***
conscientiousness time 0.063 0.032 0.048 *
conscientiousness group_1 -0.034 0.078 0.657
conscientiousness time*group_1 0.036 0.045 0.424
neuroticism Intercept 3.263 0.063 0.000 ***
neuroticism time -0.143 0.035 0.000 ***
neuroticism group_1 -0.074 0.090 0.412
neuroticism time*group_1 0.087 0.049 0.078
openness Intercept 3.704 0.048 0.000 ***
openness time 0.055 0.029 0.061
openness group_1 0.027 0.068 0.687
openness time*group_1 -0.028 0.041 0.497
sociability Intercept 2.634 0.075 0.000 ***
sociability time 0.140 0.041 0.001 ***
sociability group_1 -0.179 0.106 0.093
sociability time*group_1 -0.008 0.058 0.886
assertiveness Intercept 2.966 0.067 0.000 ***
assertiveness time 0.025 0.045 0.584
assertiveness group_1 -0.041 0.095 0.663
assertiveness time*group_1 -0.008 0.064 0.903
energy Intercept 3.162 0.064 0.000 ***
energy time 0.106 0.046 0.022 *
energy group_1 -0.046 0.091 0.611
energy time*group_1 -0.029 0.065 0.650
compassion Intercept 3.779 0.053 0.000 ***
compassion time -0.014 0.051 0.779
compassion group_1 0.020 0.075 0.792
compassion time*group_1 0.134 0.072 0.064
respectfulness Intercept 3.955 0.051 0.000 ***
respectfulness time 0.062 0.043 0.145
respectfulness group_1 0.099 0.072 0.171
respectfulness time*group_1 -0.050 0.060 0.401
trust Intercept 3.112 0.060 0.000 ***
trust time 0.114 0.048 0.020 *
trust group_1 0.099 0.085 0.243
trust time*group_1 -0.090 0.068 0.190
organization Intercept 3.446 0.075 0.000 ***
organization time 0.045 0.047 0.340
organization group_1 -0.043 0.107 0.689
organization time*group_1 0.036 0.067 0.590
productiveness Intercept 3.161 0.065 0.000 ***
productiveness time 0.103 0.048 0.033 *
productiveness group_1 -0.132 0.093 0.156
productiveness time*group_1 0.038 0.068 0.580
responsibility Intercept 3.486 0.057 0.000 ***
responsibility time 0.049 0.044 0.265
responsibility group_1 0.072 0.081 0.379
responsibility time*group_1 0.025 0.062 0.693
anxiety Intercept 3.602 0.069 0.000 ***
anxiety time -0.090 0.046 0.049 *
anxiety group_1 -0.069 0.098 0.484
anxiety time*group_1 0.065 0.064 0.316
depression Intercept 3.176 0.073 0.000 ***
depression time -0.216 0.046 0.000 ***
depression group_1 -0.086 0.104 0.413
depression time*group_1 0.141 0.065 0.032 *
volatility Intercept 3.011 0.074 0.000 ***
volatility time -0.127 0.053 0.017 *
volatility group_1 -0.067 0.106 0.527
volatility time*group_1 0.060 0.074 0.422
curiosity Intercept 3.880 0.054 0.000 ***
curiosity time 0.075 0.040 0.059
curiosity group_1 -0.008 0.077 0.914
curiosity time*group_1 -0.030 0.056 0.597
aesthetic Intercept 3.599 0.062 0.000 ***
aesthetic time 0.025 0.046 0.589
aesthetic group_1 -0.008 0.089 0.930
aesthetic time*group_1 -0.008 0.065 0.898
imagination Intercept 3.633 0.060 0.000 ***
imagination time 0.069 0.046 0.137
imagination group_1 0.100 0.086 0.246
imagination time*group_1 -0.048 0.066 0.464

No significant group_1 effects (higher trait level in Group 1 at baseline) and also no significant differences in the changes over time in current personality trait levels (timeXgroup) at p < .01 (only for depression at p < .05). Significant changes over time in current personality trait levels (time) for extraversion (also sociability) and neuroticism (also depression.

Plot of main effects: extraversion and neuroticism

Show the code
ggeffects::ggpredict(df_manip_check_curr_12$extraversion, 
                     terms = c("time_d[0,1]", "group_1[0,1]")) |> plot() +
  aes(linetype=group, color=group, shape=group) + geom_point() +
  theme_light() +
  scale_x_continuous(breaks = c(0,1), labels = c("T1", "T2")) +
  theme(panel.grid.minor.x = element_blank()) + 
  labs(title = "Extraversion", x = "Time", y = "Predicted values of\nextraversion",
       linetype="Group\n(0 = Group 2,\n1 = Group 1)", color="Group\n(0 = Group 2,\n1 = Group 1)", 
       shape="Group\n(0 = Group 2,\n1 = Group 1)")

Show the code
ggeffects::ggpredict(df_manip_check_curr_12$neuroticism, 
                     terms = c("time_d[0,1]", "group_1[0,1]")) |> plot() +
  aes(linetype=group, color=group, shape=group) + geom_point() +
  theme_light() +
  scale_x_continuous(breaks = c(0,1), labels = c("T1", "T2")) +
  theme(panel.grid.minor.x = element_blank()) + 
  labs(title = "Neuroticism", x = "Time", y = "Predicted values of\nneuroticism",
       linetype="Group\n(0 = Group 2,\n1 = Group 1)", color="Group\n(0 = Group 2,\n1 = Group 1)", 
       shape="Group\n(0 = Group 2,\n1 = Group 1)")

Group interaction effect: Depression (but only at p < .05)

Show the code
ggeffects::ggpredict(df_manip_check_curr_12$depression, 
                     terms = c("time_d[0,1]", "group_1[0,1]")) |> plot() +
  aes(linetype=group, color=group, shape=group) + geom_point() +
  theme_light() +
  scale_x_continuous(breaks = c(0,1), labels = c("T1", "T2")) +
  theme(panel.grid.minor.x = element_blank()) + 
  labs(title = "Depression", x = "Time", y = "Predicted values of\ndepression",
       linetype="Group\n(0 = Group 2,\n1 = Group 1)", color="Group\n(0 = Group 2,\n1 = Group 1)", 
       shape="Group\n(0 = Group 2,\n1 = Group 1)")

Changes in ideal personality traits over time:

Show the code
df_manip_check_ideal <- df_sbsa3 %>% 
  filter(group=="Group 1") %>% 
  mutate(time_d = time - 1) %>% 
  select(pid, time_d, 
         paste0(str_trunc(names(b5_vars), 5, ellipsis = ""), "_comb_ideal")) %>% 
  pivot_longer(ends_with("_comb_ideal"), 
               names_to = "test", names_prefix = "facet", values_to = "score", values_drop_na = TRUE) %>% 
  group_nest(test) %>% 
  mutate(lmer_mods = map(data, ~lmerTest::lmer(score ~ time_d + (1 | pid), data = .x))) %>% 
  pull(lmer_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_manip_check_ideal_unlist <- as.data.frame(summary(df_manip_check_ideal[[1]])$coefficients) %>% as_tibble()
for (i in 2:length(df_manip_check_ideal)) {
  df_manip_check_ideal_unlist <- bind_rows(df_manip_check_ideal_unlist, 
                                          as.data.frame(summary(df_manip_check_ideal[[i]])$coefficients) %>% as_tibble())
}
kable(df_manip_check_ideal_unlist %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=2), 
               term = c(rep(c("Intercept", "time"), 20))) %>% 
        rename(p = `Pr(>|t|)`, str_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, str_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars)))), # order by BFI traits, 
      digits = 3)
outcome term est str_err p sig
extraversion Intercept 3.917 0.039 0.000 ***
extraversion time -0.102 0.036 0.005 **
agreeableness Intercept 4.069 0.040 0.000 ***
agreeableness time -0.023 0.033 0.473
conscientiousness Intercept 4.391 0.034 0.000 ***
conscientiousness time 0.005 0.033 0.886
neuroticism Intercept 1.802 0.036 0.000 ***
neuroticism time 0.031 0.034 0.370
openness Intercept 4.115 0.042 0.000 ***
openness time 0.015 0.035 0.675
sociability Intercept 3.647 0.056 0.000 ***
sociability time -0.125 0.053 0.019 *
assertiveness Intercept 3.889 0.052 0.000 ***
assertiveness time -0.152 0.047 0.002 **
energy Intercept 4.217 0.044 0.000 ***
energy time -0.031 0.049 0.521
compassion Intercept 4.017 0.052 0.000 ***
compassion time 0.021 0.055 0.708
respectfulness Intercept 4.462 0.043 0.000 ***
respectfulness time -0.053 0.042 0.209
trust Intercept 3.728 0.055 0.000 ***
trust time -0.037 0.049 0.457
organization Intercept 4.414 0.043 0.000 ***
organization time 0.008 0.048 0.874
productiveness Intercept 4.506 0.041 0.000 ***
productiveness time -0.013 0.047 0.775
responsibility Intercept 4.254 0.045 0.000 ***
responsibility time 0.024 0.044 0.585
anxiety Intercept 1.902 0.047 0.000 ***
anxiety time 0.080 0.054 0.144
depression Intercept 1.511 0.041 0.000 ***
depression time 0.033 0.041 0.425
volatility Intercept 1.991 0.045 0.000 ***
volatility time -0.024 0.048 0.614
curiosity Intercept 4.151 0.050 0.000 ***
curiosity time -0.033 0.050 0.509
aesthetic Intercept 3.863 0.053 0.000 ***
aesthetic time 0.078 0.050 0.126
imagination Intercept 4.334 0.053 0.000 ***
imagination time 0.002 0.056 0.968

Significant changes over time in ideal personality trait levels (time) for extraversion and assertiveness (decrease). Stable otherwise.

8.2 Group 2 vs. Group 3: Effect of demand characteristics

Changes in current personality traits over time:

Show the code
df_manip_check_curr_23 <- df_sbsa3 %>% 
  filter(group!="Group 1") %>% 
  mutate(time_d = time - 1) %>% 
  select(pid, time_d, group, 
         paste0(str_trunc(names(b5_vars), 5, ellipsis = ""), "_comb_curr")) %>% 
  pivot_longer(ends_with("_comb_curr"), 
               names_to = "test", names_prefix = "facet", values_to = "score", values_drop_na = TRUE) %>% 
  mutate(group_2 = ifelse(group=="Group 2", 1, 0)) %>% 
  group_nest(test) %>% 
  mutate(lmer_mods = map(data, ~lmerTest::lmer(score ~ time_d * group_2 + (1 | pid), data = .x))) %>% 
  pull(lmer_mods) %>% 
  purrr::set_names(sort(names(b5_vars))) # ordered alphabetically

df_manip_check_curr_unlist_23 <- as.data.frame(summary(df_manip_check_curr_23[[1]])$coefficients) %>% as_tibble()
for (i in 2:length(df_manip_check_curr_23)) {
  df_manip_check_curr_unlist_23 <- bind_rows(df_manip_check_curr_unlist_23, 
                                          as.data.frame(summary(df_manip_check_curr_23[[i]])$coefficients) %>% as_tibble())
}
kable(df_manip_check_curr_unlist_23 %>% 
        mutate(outcome = rep(sort(names(b5_vars)), each=4), 
               term = c(rep(c("Intercept", "time", "group_2", "time*group_2"), 20))) %>% 
        rename(p = `Pr(>|t|)`, str_err = `Std. Error`, est = Estimate) %>% select(outcome, term, est, str_err, p) %>% 
        mutate(sig = ifelse(p < .05, ifelse(p < .01, ifelse(p < .001, "***", "**"), "*"), " ")) %>% 
        arrange(factor(outcome, levels = (names(b5_vars)))), # order by BFI traits, 
      digits = 3)
outcome term est str_err p sig
extraversion Intercept 3.069 0.058 0.000 ***
extraversion time -0.036 0.029 0.207
extraversion group_2 -0.148 0.081 0.070
extraversion time*group_2 0.127 0.041 0.002 **
agreeableness Intercept 3.689 0.046 0.000 ***
agreeableness time -0.005 0.030 0.876
agreeableness group_2 -0.073 0.065 0.256
agreeableness time*group_2 0.056 0.043 0.201
conscientiousness Intercept 3.543 0.056 0.000 ***
conscientiousness time 0.042 0.028 0.139
conscientiousness group_2 -0.179 0.078 0.023 *
conscientiousness time*group_2 0.021 0.040 0.605
neuroticism Intercept 3.032 0.067 0.000 ***
neuroticism time 0.014 0.031 0.648
neuroticism group_2 0.231 0.095 0.015 *
neuroticism time*group_2 -0.157 0.045 0.001 ***
openness Intercept 3.834 0.049 0.000 ***
openness time -0.008 0.024 0.749
openness group_2 -0.130 0.070 0.063
openness time*group_2 0.063 0.035 0.075
sociability Intercept 2.696 0.078 0.000 ***
sociability time 0.014 0.039 0.720
sociability group_2 -0.062 0.109 0.571
sociability time*group_2 0.126 0.056 0.025 *
assertiveness Intercept 3.173 0.068 0.000 ***
assertiveness time -0.061 0.041 0.144
assertiveness group_2 -0.207 0.096 0.032 *
assertiveness time*group_2 0.086 0.059 0.148
energy Intercept 3.337 0.067 0.000 ***
energy time -0.062 0.045 0.167
energy group_2 -0.175 0.095 0.065
energy time*group_2 0.167 0.064 0.009 **
compassion Intercept 3.807 0.057 0.000 ***
compassion time -0.018 0.048 0.710
compassion group_2 -0.027 0.081 0.733
compassion time*group_2 0.001 0.068 0.989
respectfulness Intercept 4.009 0.051 0.000 ***
respectfulness time 0.036 0.040 0.365
respectfulness group_2 -0.053 0.072 0.461
respectfulness time*group_2 0.026 0.057 0.652
trust Intercept 3.251 0.062 0.000 ***
trust time -0.032 0.042 0.457
trust group_2 -0.140 0.088 0.112
trust time*group_2 0.143 0.061 0.019 *
organization Intercept 3.685 0.074 0.000 ***
organization time -0.049 0.041 0.238
organization group_2 -0.239 0.104 0.022 *
organization time*group_2 0.093 0.059 0.114
productiveness Intercept 3.345 0.066 0.000 ***
productiveness time 0.139 0.042 0.001 **
productiveness group_2 -0.185 0.092 0.047 *
productiveness time*group_2 -0.038 0.060 0.529
responsibility Intercept 3.598 0.056 0.000 ***
responsibility time 0.033 0.041 0.421
responsibility group_2 -0.112 0.079 0.157
responsibility time*group_2 0.016 0.058 0.785
anxiety Intercept 3.412 0.073 0.000 ***
anxiety time 0.035 0.042 0.405
anxiety group_2 0.190 0.102 0.064
anxiety time*group_2 -0.124 0.060 0.038 *
depression Intercept 2.827 0.077 0.000 ***
depression time 0.022 0.043 0.612
depression group_2 0.349 0.108 0.001 **
depression time*group_2 -0.237 0.062 0.000 ***
volatility Intercept 2.857 0.077 0.000 ***
volatility time -0.016 0.046 0.735
volatility group_2 0.155 0.109 0.157
volatility time*group_2 -0.110 0.066 0.097
curiosity Intercept 4.006 0.055 0.000 ***
curiosity time -0.001 0.034 0.973
curiosity group_2 -0.125 0.077 0.106
curiosity time*group_2 0.075 0.048 0.120
aesthetic Intercept 3.722 0.065 0.000 ***
aesthetic time -0.010 0.038 0.782
aesthetic group_2 -0.122 0.091 0.179
aesthetic time*group_2 0.035 0.054 0.520
imagination Intercept 3.774 0.063 0.000 ***
imagination time -0.008 0.043 0.860
imagination group_2 -0.141 0.089 0.112
imagination time*group_2 0.076 0.061 0.216

One significant group_2 effect: higher depression trait level in Group 2 at baseline compared to Group 3. Significant differences in the changes over time in current personality trait levels (timeXgroup) for extraversion, neuroticism, and depression (at p < .01). Significant changes over time in current personality trait levels (main effects of time) for productiveness (independent of group membership).

Plot of main effects: productiveness

Show the code
ggeffects::ggpredict(df_manip_check_curr_23$productiveness, 
                     terms = c("time_d[0,1]", "group_2[0,1]")) |> plot() +
  aes(linetype=group, color=group, shape=group) + geom_point() +
  theme_light() +
  scale_x_continuous(breaks = c(0,1), labels = c("T1", "T2")) +
  theme(panel.grid.minor.x = element_blank()) + 
  labs(title = "Productiveness", x = "Time", y = "Predicted values of\nproductiveness",
       linetype="Group\n(0 = Group 3,\n1 = Group 2)", color="Group\n(0 = Group 3,\n1 = Group 2)", 
       shape="Group\n(0 = Group 3,\n1 = Group 2)")

Group interaction effects: extraversion, neuroticism, and depression

Show the code
ggeffects::ggpredict(df_manip_check_curr_23$extraversion, 
                     terms = c("time_d[0,1]", "group_2[0,1]")) |> plot() +
  aes(linetype=group, color=group, shape=group) + geom_point() +
  theme_light() +
  scale_x_continuous(breaks = c(0,1), labels = c("T1", "T2")) +
  theme(panel.grid.minor.x = element_blank()) + 
  labs(title = "Extraversion", x = "Time", y = "Predicted values of\nextraversion",
       linetype="Group\n(0 = Group 3,\n1 = Group 2)", color="Group\n(0 = Group 3,\n1 = Group 2)", 
       shape="Group\n(0 = Group 3,\n1 = Group 2)")

Show the code
ggeffects::ggpredict(df_manip_check_curr_23$neuroticism, 
                     terms = c("time_d[0,1]", "group_2[0,1]")) |> plot() +
  aes(linetype=group, color=group, shape=group) + geom_point() +
  theme_light() +
  scale_x_continuous(breaks = c(0,1), labels = c("T1", "T2")) +
  theme(panel.grid.minor.x = element_blank()) + 
  labs(title = "Neuroticism", x = "Time", y = "Predicted values of\nneuroticism",
       linetype="Group\n(0 = Group 3,\n1 = Group 2)", color="Group\n(0 = Group 3,\n1 = Group 2)", 
       shape="Group\n(0 = Group 3,\n1 = Group 2)")

Show the code
ggeffects::ggpredict(df_manip_check_curr_23$depression, 
                     terms = c("time_d[0,1]", "group_2[0,1]")) |> plot() +
  aes(linetype=group, color=group, shape=group) + geom_point() +
  theme_light() +
  scale_x_continuous(breaks = c(0,1), labels = c("T1", "T2")) +
  theme(panel.grid.minor.x = element_blank()) + 
  labs(title = "Depression", x = "Time", y = "Predicted values of\ndepression",
       linetype="Group\n(0 = Group 3,\n1 = Group 2)", color="Group\n(0 = Group 3,\n1 = Group 2)", 
       shape="Group\n(0 = Group 3,\n1 = Group 2)")

8.3 Initial group differences at T1

8.3.1 Big Five personality traits

df_sbsa3 <- df_sbsa3 %>% mutate(interv = ifelse(group=="Group 3", 0, 1))
# extra
psych::describeBy(df_sbsa3 %>% filter(time==1) %>% pull(extra_comb_curr), 
                  group = df_sbsa3 %>% filter(time==1) %>% pull(interv))

 Descriptive statistics by group 
group: 0
   vars   n mean   sd median trimmed mad min max range skew kurtosis   se
X1    1 176 3.07 0.76   3.08    3.05 0.8   1   5     4 0.15    -0.46 0.06
------------------------------------------------------------ 
group: 1
   vars   n mean   sd median trimmed  mad min max range skew kurtosis   se
X1    1 353 2.88 0.75   2.83    2.86 0.74   1   5     4 0.18    -0.26 0.04
t.test(extra_comb_curr ~ interv, data = df_sbsa3 %>% filter(time==1), paired = F, conf.level = 0.99)

    Welch Two Sample t-test

data:  extra_comb_curr by interv
t = 2.7368, df = 347.5, p-value = 0.006522
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
99 percent confidence interval:
 0.01026637 0.37255887
sample estimates:
mean in group 0 mean in group 1 
       3.068655        2.877243 
cohen.d(extra_comb_curr ~ interv, data = df_sbsa3 %>% filter(time==1), alpha=.01)
Call: cohen.d(x = extra_comb_curr ~ interv, alpha = 0.01, data = df_sbsa3 %>% 
    filter(time == 1))
Cohen d statistic of difference between two means
                lower effect upper
extra_comb_curr -0.49  -0.25 -0.01

Multivariate (Mahalanobis) distance between groups
[1] 0.25
r equivalent of difference between two means
extra_comb_curr 
          -0.12 
# agree
psych::describeBy(df_sbsa3 %>% filter(time==1) %>% pull(agree_comb_curr), 
                  group = df_sbsa3 %>% filter(time==1) %>% pull(interv))

 Descriptive statistics by group 
group: 0
   vars   n mean   sd median trimmed  mad  min max range  skew kurtosis   se
X1    1 176 3.69 0.62   3.71    3.72 0.56 1.75   5  3.25 -0.35     0.06 0.05
------------------------------------------------------------ 
group: 1
   vars   n mean   sd median trimmed  mad  min  max range  skew kurtosis   se
X1    1 353 3.65 0.57   3.67    3.67 0.62 1.75 4.92  3.17 -0.39     0.06 0.03
t.test(agree_comb_curr ~ interv, data = df_sbsa3 %>% filter(time==1), paired = F, conf.level = 0.99)

    Welch Two Sample t-test

data:  agree_comb_curr by interv
t = 0.64373, df = 326.78, p-value = 0.5202
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
99 percent confidence interval:
 -0.1087340  0.1806259
sample estimates:
mean in group 0 mean in group 1 
       3.688920        3.652975 
cohen.d(agree_comb_curr ~ interv, data = df_sbsa3 %>% filter(time==1), alpha=.01)
Call: cohen.d(x = agree_comb_curr ~ interv, alpha = 0.01, data = df_sbsa3 %>% 
    filter(time == 1))
Cohen d statistic of difference between two means
                lower effect upper
agree_comb_curr  -0.3  -0.06  0.18

Multivariate (Mahalanobis) distance between groups
[1] 0.061
r equivalent of difference between two means
agree_comb_curr 
          -0.03 
# consc
psych::describeBy(df_sbsa3 %>% filter(time==1) %>% pull(consc_comb_curr), 
                  group = df_sbsa3 %>% filter(time==1) %>% pull(interv))

 Descriptive statistics by group 
group: 0
   vars   n mean   sd median trimmed  mad  min max range  skew kurtosis   se
X1    1 176 3.54 0.77   3.58    3.58 0.74 1.75   5  3.25 -0.41    -0.45 0.06
------------------------------------------------------------ 
group: 1
   vars   n mean   sd median trimmed  mad  min max range  skew kurtosis   se
X1    1 353 3.35 0.74   3.42    3.37 0.74 1.17   5  3.83 -0.26     -0.2 0.04
t.test(consc_comb_curr ~ interv, data = df_sbsa3 %>% filter(time==1), paired = F, conf.level = 0.99)

    Welch Two Sample t-test

data:  consc_comb_curr by interv
t = 2.7852, df = 338.3, p-value = 0.005651
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
99 percent confidence interval:
 0.0136436 0.3765884
sample estimates:
mean in group 0 mean in group 1 
       3.542614        3.347498 
cohen.d(consc_comb_curr ~ interv, data = df_sbsa3 %>% filter(time==1), alpha=.01)
Call: cohen.d(x = consc_comb_curr ~ interv, alpha = 0.01, data = df_sbsa3 %>% 
    filter(time == 1))
Cohen d statistic of difference between two means
                lower effect upper
consc_comb_curr  -0.5  -0.26 -0.02

Multivariate (Mahalanobis) distance between groups
[1] 0.26
r equivalent of difference between two means
consc_comb_curr 
          -0.12 
# neuro
psych::describeBy(df_sbsa3 %>% filter(time==1) %>% pull(neuro_comb_curr), 
                  group = df_sbsa3 %>% filter(time==1) %>% pull(interv))

 Descriptive statistics by group 
group: 0
   vars   n mean   sd median trimmed  mad  min  max range  skew kurtosis   se
X1    1 176 3.03 0.94      3    3.04 1.11 1.08 4.92  3.83 -0.03    -0.89 0.07
------------------------------------------------------------ 
group: 1
   vars   n mean   sd median trimmed  mad  min max range  skew kurtosis   se
X1    1 353 3.23 0.86   3.25    3.25 0.86 1.25   5  3.75 -0.23    -0.59 0.05
t.test(neuro_comb_curr ~ interv, data = df_sbsa3 %>% filter(time==1), paired = F, conf.level = 0.99)

    Welch Two Sample t-test

data:  neuro_comb_curr by interv
t = -2.3139, df = 322.55, p-value = 0.0213
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
99 percent confidence interval:
 -0.41266814  0.02332946
sample estimates:
mean in group 0 mean in group 1 
       3.031723        3.226393 
cohen.d(neuro_comb_curr ~ interv, data = df_sbsa3 %>% filter(time==1), alpha=.01)
Call: cohen.d(x = neuro_comb_curr ~ interv, alpha = 0.01, data = df_sbsa3 %>% 
    filter(time == 1))
Cohen d statistic of difference between two means
                lower effect upper
neuro_comb_curr -0.02   0.22  0.46

Multivariate (Mahalanobis) distance between groups
[1] 0.22
r equivalent of difference between two means
neuro_comb_curr 
            0.1 
# openn
psych::describeBy(df_sbsa3 %>% filter(time==1) %>% pull(openn_comb_curr), 
                  group = df_sbsa3 %>% filter(time==1) %>% pull(interv))

 Descriptive statistics by group 
group: 0
   vars   n mean   sd median trimmed  mad  min max range  skew kurtosis   se
X1    1 176 3.83 0.64   3.83    3.85 0.62 2.08   5  2.92 -0.23    -0.56 0.05
------------------------------------------------------------ 
group: 1
   vars   n mean   sd median trimmed  mad  min max range  skew kurtosis   se
X1    1 353 3.72 0.64   3.67    3.74 0.62 1.75   5  3.25 -0.33    -0.06 0.03
t.test(openn_comb_curr ~ interv, data = df_sbsa3 %>% filter(time==1), paired = F, conf.level = 0.99)

    Welch Two Sample t-test

data:  openn_comb_curr by interv
t = 1.9307, df = 350.41, p-value = 0.05433
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
99 percent confidence interval:
 -0.03895913  0.26713590
sample estimates:
mean in group 0 mean in group 1 
       3.833807        3.719718 
cohen.d(openn_comb_curr ~ interv, data = df_sbsa3 %>% filter(time==1), alpha=.01)
Call: cohen.d(x = openn_comb_curr ~ interv, alpha = 0.01, data = df_sbsa3 %>% 
    filter(time == 1))
Cohen d statistic of difference between two means
                lower effect upper
openn_comb_curr -0.42  -0.18  0.06

Multivariate (Mahalanobis) distance between groups
[1] 0.18
r equivalent of difference between two means
openn_comb_curr 
          -0.08 

8.3.2 Well-being

# swls
psych::describeBy(df_sbsa3 %>% filter(time==1) %>% pull(swls), 
                  group = df_sbsa3 %>% filter(time==1) %>% pull(interv))

 Descriptive statistics by group 
group: 0
   vars   n mean sd median trimmed  mad min max range  skew kurtosis   se
X1    1 176 2.94  1      3    2.94 1.19   1   5     4 -0.06     -0.8 0.08
------------------------------------------------------------ 
group: 1
   vars   n mean sd median trimmed  mad min max range skew kurtosis   se
X1    1 353 2.69  1    2.6    2.66 1.19   1   5     4 0.18    -0.83 0.05
t.test(swls ~ interv, data = df_sbsa3 %>% filter(time==1), paired = F, conf.level = 0.99)

    Welch Two Sample t-test

data:  swls by interv
t = 2.7266, df = 349.3, p-value = 0.006721
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
99 percent confidence interval:
 0.01260011 0.49016193
sample estimates:
mean in group 0 mean in group 1 
       2.937500        2.686119 
cohen.d(swls ~ interv, data = df_sbsa3 %>% filter(time==1), alpha=.01)
Call: cohen.d(x = swls ~ interv, alpha = 0.01, data = df_sbsa3 %>% 
    filter(time == 1))
Cohen d statistic of difference between two means
     lower effect upper
swls -0.49  -0.25 -0.01

Multivariate (Mahalanobis) distance between groups
[1] 0.25
r equivalent of difference between two means
 swls 
-0.12 
# meaning
psych::describeBy(df_sbsa3 %>% filter(time==1) %>% pull(meaning), 
                  group = df_sbsa3 %>% filter(time==1) %>% pull(interv))

 Descriptive statistics by group 
group: 0
   vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
X1    1 176 4.38 1.53    4.6    4.47 1.48   1   7     6 -0.45    -0.69 0.12
------------------------------------------------------------ 
group: 1
   vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
X1    1 353  4.3 1.46    4.4    4.33 1.78   1   7     6 -0.14    -0.88 0.08
t.test(meaning ~ interv, data = df_sbsa3 %>% filter(time==1), paired = F, conf.level = 0.99)

    Welch Two Sample t-test

data:  meaning by interv
t = 0.52497, df = 336.36, p-value = 0.6
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
99 percent confidence interval:
 -0.2873203  0.4333672
sample estimates:
mean in group 0 mean in group 1 
       4.377273        4.304249 
cohen.d(meaning ~ interv, data = df_sbsa3 %>% filter(time==1), alpha=.01)
Call: cohen.d(x = meaning ~ interv, alpha = 0.01, data = df_sbsa3 %>% 
    filter(time == 1))
Cohen d statistic of difference between two means
        lower effect upper
meaning -0.29  -0.05  0.19

Multivariate (Mahalanobis) distance between groups
[1] 0.049
r equivalent of difference between two means
meaning 
  -0.02 
# search for meaning
psych::describeBy(df_sbsa3 %>% filter(time==1) %>% pull(search), 
                  group = df_sbsa3 %>% filter(time==1) %>% pull(interv))

 Descriptive statistics by group 
group: 0
   vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
X1    1 176 4.84 1.46      5    4.95 1.48   1   7     6 -0.58     -0.2 0.11
------------------------------------------------------------ 
group: 1
   vars   n mean  sd median trimmed  mad min max range  skew kurtosis   se
X1    1 353 4.96 1.4    5.2    5.09 1.19   1   7     6 -0.87     0.49 0.07
t.test(search ~ interv, data = df_sbsa3 %>% filter(time==1), paired = F, conf.level = 0.99)

    Welch Two Sample t-test

data:  search by interv
t = -0.85077, df = 337.67, p-value = 0.3955
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
99 percent confidence interval:
 -0.4578563  0.2314656
sample estimates:
mean in group 0 mean in group 1 
       4.842045        4.955241 
cohen.d(search ~ interv, data = df_sbsa3 %>% filter(time==1), alpha=.01)
Call: cohen.d(x = search ~ interv, alpha = 0.01, data = df_sbsa3 %>% 
    filter(time == 1))
Cohen d statistic of difference between two means
       lower effect upper
search -0.16   0.08  0.32

Multivariate (Mahalanobis) distance between groups
[1] 0.08
r equivalent of difference between two means
search 
  0.04 
# selfes
psych::describeBy(df_sbsa3 %>% filter(time==1) %>% pull(selfes), 
                  group = df_sbsa3 %>% filter(time==1) %>% pull(interv))

 Descriptive statistics by group 
group: 0
   vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
X1    1 176 3.44 0.93    3.5    3.46 1.04 1.2   5   3.8 -0.17    -0.95 0.07
------------------------------------------------------------ 
group: 1
   vars   n mean   sd median trimmed  mad min max range skew kurtosis   se
X1    1 353 3.23 0.86    3.1    3.23 0.89 1.1   5   3.9 0.07    -0.67 0.05
t.test(selfes ~ interv, data = df_sbsa3 %>% filter(time==1), paired = F, conf.level = 0.99)

    Welch Two Sample t-test

data:  selfes by interv
t = 2.5001, df = 323.94, p-value = 0.01291
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
99 percent confidence interval:
 -0.007620598  0.426558919
sample estimates:
mean in group 0 mean in group 1 
       3.442614        3.233144 
cohen.d(selfes ~ interv, data = df_sbsa3 %>% filter(time==1), alpha=.01)
Call: cohen.d(x = selfes ~ interv, alpha = 0.01, data = df_sbsa3 %>% 
    filter(time == 1))
Cohen d statistic of difference between two means
       lower effect upper
selfes -0.48  -0.24     0

Multivariate (Mahalanobis) distance between groups
[1] 0.24
r equivalent of difference between two means
selfes 
 -0.11 
# concept
psych::describeBy(df_sbsa3 %>% filter(time==1) %>% pull(concept), 
                  group = df_sbsa3 %>% filter(time==1) %>% pull(interv))

 Descriptive statistics by group 
group: 0
   vars   n mean   sd median trimmed  mad  min max range skew kurtosis   se
X1    1 176 3.16 0.91   3.17    3.16 1.05 1.25   5  3.75 0.05    -0.91 0.07
------------------------------------------------------------ 
group: 1
   vars   n mean   sd median trimmed  mad  min max range skew kurtosis   se
X1    1 353 3.06 0.85      3    3.03 0.86 1.33   5  3.67 0.32    -0.73 0.05
t.test(concept ~ interv, data = df_sbsa3 %>% filter(time==1), paired = F, conf.level = 0.99)

    Welch Two Sample t-test

data:  concept by interv
t = 1.1862, df = 328.92, p-value = 0.2364
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
99 percent confidence interval:
 -0.1159972  0.3119046
sample estimates:
mean in group 0 mean in group 1 
       3.160985        3.063031 
cohen.d(concept ~ interv, data = df_sbsa3 %>% filter(time==1), alpha=.01)
Call: cohen.d(x = concept ~ interv, alpha = 0.01, data = df_sbsa3 %>% 
    filter(time == 1))
Cohen d statistic of difference between two means
        lower effect upper
concept -0.35  -0.11  0.13

Multivariate (Mahalanobis) distance between groups
[1] 0.11
r equivalent of difference between two means
concept 
  -0.05